Sie sind auf Seite 1von 14

burn_v2

September 24, 2019

[2]: import cv2


import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix, precision_score, recall_score,␣
,→f1_score

import pandas as pd
import pickle
import random
[3]: # Preprocessing
# This function creates image dataset after preprocessing images. Image can be␣
,→grayscale or rgb

def create_image_data(CATEGORIES, img_size, dimension, DataDir):


database = []
for category in CATEGORIES:
print(category)
path = os.path.join(DataDir, category)
class_num = CATEGORIES.index(category)
for img_name in tqdm(os.listdir(path)):
try:
if dimension == 1:
img = cv2.imread(os.path.join(path, img_name), cv2.
,→IMREAD_GRAYSCALE)

elif dimension == 3:
img = cv2.imread(os.path.join(path, img_name))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
print('Please select dimension either 1 or 3')

1
img = cv2.resize(img, (img_size, img_size))
database.append([img, class_num])
except Exception as e:
pass
return database
[4]: # Preprocessing
# This function preprocess single image
def prepare(filepath, IMG_SIZE, dimension):
if dimension == 1:
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
elif dimension == 3:
img_array = cv2.imread(filepath)
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
else:
print("Please select dimension either 1 or 3")
new_array = new_array.reshape(-1, IMG_SIZE, IMG_SIZE, dimension)
return new_array
[11]: IMG_SIZE = 200
DIM = 3
CATEGORIES = ["superficial", "deep", "full"]

DATADIR = "C:\Burns_BIP_US_database2\Training set"


[22]: # Create training dataset
training_data = create_image_data(CATEGORIES, IMG_SIZE, DIM, DATADIR)

# Randomize the dataset


random.shuffle(training_data)

XF = [] # XF will contain training image data


yF = []# yF will contain corresponding image class

for features,label in training_data:


XF.append(features)
yF.append(label)

XF = np.array(XF).reshape(-1, IMG_SIZE, IMG_SIZE, DIM)


print("Shape XF", XF.shape)
print(yF)

# Uncomment if want to save XF and yF for later use


'''
# Save it
pickle_out = open("XF.pickle","wb")

2
pickle.dump(XF, pickle_out)
pickle_out.close()

pickle_out = open("yF.pickle","wb")
pickle.dump(yF, pickle_out)
pickle_out.close()
'''

superficial

100%|
| 28/28 [00:00<00:00, 1276.03it/s]

deep

100%|
| 17/17 [00:00<00:00, 1311.20it/s]

full

100%|
| 5/5 [00:00<00:00, 1687.85it/s]

Shape XF (50, 200, 200, 3)


[0, 0, 1, 1, 0, 0, 1, 2, 0, 1, 1, 0, 2, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 0, 1, 1, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1]

[22]: '\n# Save it\npickle_out = open("XF.pickle","wb")\npickle.dump(XF,


pickle_out)\npickle_out.close()\n\npickle_out =
open("yF.pickle","wb")\npickle.dump(yF, pickle_out)\npickle_out.close()\n'
[23]: # Uncomment if want to load XF and yF again (only applicable if XF and yF are␣
,→stored in previous cell)

'''
pickle_in = open("XF.pickle","rb")
XF = pickle.load(pickle_in)

pickle_in = open("yF.pickle","rb")
yF = pickle.load(pickle_in)
'''
[23]: '\npickle_in = open("XF.pickle","rb")\nXF = pickle.load(pickle_in)\n\npickle_in
= open("yF.pickle","rb")\nyF = pickle.load(pickle_in)\n'
[24]: # Make model
XF = XF/255.0

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=XF.shape[1:]))

3
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))


model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(64))

model.add(Dense(len(CATEGORIES)))
model.add(Activation('softmax'))

model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
[25]: # Train model
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10,␣
,→verbose=1, mode='auto',

restore_best_weights=True)
hist = model.fit(XF, yF, batch_size=8, epochs=500, validation_split=0.4,␣
,→callbacks=[monitor])

Train on 30 samples, validate on 20 samples


Epoch 1/500
30/30 [==============================] - ETA: 1s - loss: 1.0741 - acc: 0.250 -
ETA: 0s - loss: 42.7755 - acc: 0.25 - ETA: 0s - loss: 32.5824 - acc: 0.37 - 1s
41ms/sample - loss: 29.7459 - acc: 0.3333 - val_loss: 15.2719 - val_acc: 0.3000
Epoch 2/500
30/30 [==============================] - ETA: 0s - loss: 18.2106 - acc: 0.12 -
ETA: 0s - loss: 11.7698 - acc: 0.25 - ETA: 0s - loss: 8.7144 - acc: 0.2500 - 1s
36ms/sample - loss: 7.0136 - acc: 0.3667 - val_loss: 0.8187 - val_acc: 0.6000
Epoch 3/500
30/30 [==============================] - ETA: 0s - loss: 0.7294 - acc: 0.625 -
ETA: 0s - loss: 0.6882 - acc: 0.687 - ETA: 0s - loss: 0.6635 - acc: 0.666 - 1s
33ms/sample - loss: 0.6950 - acc: 0.6333 - val_loss: 0.6637 - val_acc: 0.7000
Epoch 4/500
30/30 [==============================] - ETA: 0s - loss: 0.5328 - acc: 0.875 -
ETA: 0s - loss: 0.5531 - acc: 0.812 - ETA: 0s - loss: 0.5851 - acc: 0.750 - 1s
32ms/sample - loss: 0.7267 - acc: 0.7333 - val_loss: 0.9744 - val_acc: 0.7500
Epoch 5/500
30/30 [==============================] - ETA: 0s - loss: 0.2482 - acc: 1.000 -
ETA: 0s - loss: 0.4922 - acc: 0.937 - ETA: 0s - loss: 0.5404 - acc: 0.875 - 1s
32ms/sample - loss: 0.6386 - acc: 0.8000 - val_loss: 0.7851 - val_acc: 0.7000
Epoch 6/500
30/30 [==============================] - ETA: 0s - loss: 0.6058 - acc: 0.625 -

4
ETA: 0s - loss: 0.5020 - acc: 0.750 - ETA: 0s - loss: 0.6798 - acc: 0.750 - 1s
34ms/sample - loss: 0.6299 - acc: 0.7667 - val_loss: 1.6513 - val_acc: 0.5500
Epoch 7/500
30/30 [==============================] - ETA: 0s - loss: 2.7595 - acc: 0.125 -
ETA: 0s - loss: 1.4721 - acc: 0.562 - ETA: 0s - loss: 1.1069 - acc: 0.625 - 1s
33ms/sample - loss: 1.1364 - acc: 0.6000 - val_loss: 0.6369 - val_acc: 0.7000
Epoch 8/500
30/30 [==============================] - ETA: 0s - loss: 0.3425 - acc: 0.750 -
ETA: 0s - loss: 0.3804 - acc: 0.750 - ETA: 0s - loss: 0.4258 - acc: 0.750 - 1s
32ms/sample - loss: 0.3996 - acc: 0.7667 - val_loss: 0.6525 - val_acc: 0.8500
Epoch 9/500
30/30 [==============================] - ETA: 0s - loss: 0.2073 - acc: 0.875 -
ETA: 0s - loss: 0.3876 - acc: 0.812 - ETA: 0s - loss: 0.3903 - acc: 0.833 - 1s
32ms/sample - loss: 0.3286 - acc: 0.8667 - val_loss: 0.6387 - val_acc: 0.8000
Epoch 10/500
30/30 [==============================] - ETA: 0s - loss: 0.4334 - acc: 0.875 -
ETA: 0s - loss: 0.2652 - acc: 0.937 - ETA: 0s - loss: 0.3847 - acc: 0.875 - 1s
35ms/sample - loss: 0.3279 - acc: 0.9000 - val_loss: 0.6138 - val_acc: 0.8000
Epoch 11/500
30/30 [==============================] - ETA: 0s - loss: 0.0874 - acc: 1.000 -
ETA: 0s - loss: 0.3298 - acc: 0.875 - ETA: 0s - loss: 0.3202 - acc: 0.875 - 1s
33ms/sample - loss: 0.2950 - acc: 0.9000 - val_loss: 0.5616 - val_acc: 0.8500
Epoch 12/500
30/30 [==============================] - ETA: 0s - loss: 0.2226 - acc: 0.875 -
ETA: 0s - loss: 0.2513 - acc: 0.875 - ETA: 0s - loss: 0.2465 - acc: 0.916 - 1s
34ms/sample - loss: 0.3073 - acc: 0.9000 - val_loss: 0.5232 - val_acc: 0.8000
Epoch 13/500
30/30 [==============================] - ETA: 0s - loss: 0.1865 - acc: 1.000 -
ETA: 0s - loss: 0.1337 - acc: 1.000 - ETA: 0s - loss: 0.2002 - acc: 0.958 - 1s
34ms/sample - loss: 0.2975 - acc: 0.9000 - val_loss: 0.5602 - val_acc: 0.8500
Epoch 14/500
30/30 [==============================] - ETA: 0s - loss: 0.1326 - acc: 1.000 -
ETA: 0s - loss: 0.1860 - acc: 0.937 - ETA: 0s - loss: 0.2421 - acc: 0.916 - 1s
32ms/sample - loss: 0.2583 - acc: 0.9000 - val_loss: 0.5613 - val_acc: 0.8000
Epoch 15/500
30/30 [==============================] - ETA: 0s - loss: 0.2086 - acc: 0.875 -
ETA: 0s - loss: 0.2186 - acc: 0.875 - ETA: 0s - loss: 0.2778 - acc: 0.875 - 1s
32ms/sample - loss: 0.2373 - acc: 0.9000 - val_loss: 0.5505 - val_acc: 0.8500
Epoch 16/500
30/30 [==============================] - ETA: 0s - loss: 0.2015 - acc: 0.875 -
ETA: 0s - loss: 0.1551 - acc: 0.937 - ETA: 0s - loss: 0.1691 - acc: 0.916 - 1s
33ms/sample - loss: 0.2417 - acc: 0.9000 - val_loss: 0.5070 - val_acc: 0.8000
Epoch 17/500
30/30 [==============================] - ETA: 0s - loss: 0.1871 - acc: 0.875 -
ETA: 0s - loss: 0.1644 - acc: 0.937 - ETA: 0s - loss: 0.2285 - acc: 0.875 - 1s
32ms/sample - loss: 0.1980 - acc: 0.9000 - val_loss: 0.6738 - val_acc: 0.8500
Epoch 18/500
30/30 [==============================] - ETA: 0s - loss: 0.1019 - acc: 0.875 -

5
ETA: 0s - loss: 0.1469 - acc: 0.875 - ETA: 0s - loss: 0.2157 - acc: 0.833 - 1s
34ms/sample - loss: 0.2184 - acc: 0.8667 - val_loss: 0.5049 - val_acc: 0.8000
Epoch 19/500
30/30 [==============================] - ETA: 0s - loss: 0.0793 - acc: 1.000 -
ETA: 0s - loss: 0.1890 - acc: 0.937 - ETA: 0s - loss: 0.2570 - acc: 0.916 - 1s
33ms/sample - loss: 0.2387 - acc: 0.9000 - val_loss: 0.5636 - val_acc: 0.8000
Epoch 20/500
30/30 [==============================] - ETA: 0s - loss: 0.1855 - acc: 0.875 -
ETA: 0s - loss: 0.2118 - acc: 0.875 - ETA: 0s - loss: 0.1808 - acc: 0.875 - 1s
34ms/sample - loss: 0.1580 - acc: 0.9000 - val_loss: 0.9510 - val_acc: 0.8500
Epoch 21/500
30/30 [==============================] - ETA: 0s - loss: 0.3450 - acc: 0.875 -
ETA: 0s - loss: 0.3063 - acc: 0.875 - ETA: 0s - loss: 0.2103 - acc: 0.916 - 1s
34ms/sample - loss: 0.2011 - acc: 0.9333 - val_loss: 0.5379 - val_acc: 0.8000
Epoch 22/500
30/30 [==============================] - ETA: 0s - loss: 0.2650 - acc: 0.875 -
ETA: 0s - loss: 0.1455 - acc: 0.937 - ETA: 0s - loss: 0.1310 - acc: 0.958 - 1s
34ms/sample - loss: 0.1358 - acc: 0.9667 - val_loss: 0.8479 - val_acc: 0.8500
Epoch 23/500
30/30 [==============================] - ETA: 0s - loss: 0.1120 - acc: 1.000 -
ETA: 0s - loss: 0.0758 - acc: 1.000 - ETA: 0s - loss: 0.0887 - acc: 1.000 - 1s
34ms/sample - loss: 0.1242 - acc: 0.9667 - val_loss: 0.5569 - val_acc: 0.8000
Epoch 24/500
30/30 [==============================] - ETA: 0s - loss: 0.0541 - acc: 1.000 -
ETA: 0s - loss: 0.1101 - acc: 0.937 - ETA: 0s - loss: 0.1282 - acc: 0.958 - 1s
33ms/sample - loss: 0.1051 - acc: 0.9667 - val_loss: 0.4309 - val_acc: 0.8500
Epoch 25/500
30/30 [==============================] - ETA: 0s - loss: 0.0719 - acc: 1.000 -
ETA: 0s - loss: 0.0573 - acc: 1.000 - ETA: 0s - loss: 0.1264 - acc: 0.916 - 1s
35ms/sample - loss: 0.1422 - acc: 0.9333 - val_loss: 0.5512 - val_acc: 0.8000
Epoch 26/500
30/30 [==============================] - ETA: 0s - loss: 0.0401 - acc: 1.000 -
ETA: 0s - loss: 0.0969 - acc: 1.000 - ETA: 0s - loss: 0.1242 - acc: 0.958 - 1s
34ms/sample - loss: 0.1627 - acc: 0.9000 - val_loss: 0.5190 - val_acc: 0.8000
Epoch 27/500
30/30 [==============================] - ETA: 0s - loss: 0.1328 - acc: 1.000 -
ETA: 0s - loss: 0.0939 - acc: 1.000 - ETA: 0s - loss: 0.1741 - acc: 0.958 - 1s
34ms/sample - loss: 0.1488 - acc: 0.9667 - val_loss: 0.3746 - val_acc: 0.8000
Epoch 28/500
30/30 [==============================] - ETA: 0s - loss: 0.0391 - acc: 1.000 -
ETA: 0s - loss: 0.0403 - acc: 1.000 - ETA: 0s - loss: 0.1169 - acc: 0.958 - 1s
32ms/sample - loss: 0.1157 - acc: 0.9667 - val_loss: 0.4263 - val_acc: 0.8000
Epoch 29/500
30/30 [==============================] - ETA: 0s - loss: 0.0387 - acc: 1.000 -
ETA: 0s - loss: 0.0976 - acc: 1.000 - ETA: 0s - loss: 0.0812 - acc: 1.000 - 1s
34ms/sample - loss: 0.0744 - acc: 1.0000 - val_loss: 0.6152 - val_acc: 0.8500
Epoch 30/500
30/30 [==============================] - ETA: 0s - loss: 0.1244 - acc: 1.000 -

6
ETA: 0s - loss: 0.0855 - acc: 1.000 - ETA: 0s - loss: 0.0729 - acc: 1.000 - 1s
32ms/sample - loss: 0.0620 - acc: 1.0000 - val_loss: 0.7550 - val_acc: 0.8500
Epoch 31/500
30/30 [==============================] - ETA: 0s - loss: 0.0149 - acc: 1.000 -
ETA: 0s - loss: 0.0475 - acc: 1.000 - ETA: 0s - loss: 0.0341 - acc: 1.000 - 1s
35ms/sample - loss: 0.0438 - acc: 1.0000 - val_loss: 0.6932 - val_acc: 0.8000
Epoch 32/500
30/30 [==============================] - ETA: 0s - loss: 0.0441 - acc: 1.000 -
ETA: 0s - loss: 0.0357 - acc: 1.000 - ETA: 0s - loss: 0.0271 - acc: 1.000 - 1s
34ms/sample - loss: 0.0292 - acc: 1.0000 - val_loss: 0.5281 - val_acc: 0.8000
Epoch 33/500
30/30 [==============================] - ETA: 0s - loss: 0.0181 - acc: 1.000 -
ETA: 0s - loss: 0.0162 - acc: 1.000 - ETA: 0s - loss: 0.0248 - acc: 1.000 - 1s
32ms/sample - loss: 0.0205 - acc: 1.0000 - val_loss: 0.5451 - val_acc: 0.8000
Epoch 34/500
30/30 [==============================] - ETA: 0s - loss: 0.0118 - acc: 1.000 -
ETA: 0s - loss: 0.0197 - acc: 1.000 - ETA: 0s - loss: 0.0169 - acc: 1.000 - 1s
33ms/sample - loss: 0.0202 - acc: 1.0000 - val_loss: 0.5262 - val_acc: 0.8000
Epoch 35/500
30/30 [==============================] - ETA: 0s - loss: 0.0115 - acc: 1.000 -
ETA: 0s - loss: 0.0079 - acc: 1.000 - ETA: 0s - loss: 0.0146 - acc: 1.000 - 1s
32ms/sample - loss: 0.0179 - acc: 1.0000 - val_loss: 0.6169 - val_acc: 0.8000
Epoch 36/500
30/30 [==============================] - ETA: 0s - loss: 0.0051 - acc: 1.000 -
ETA: 0s - loss: 0.0142 - acc: 1.000 - ETA: 0s - loss: 0.0123 - acc: 1.000 - 1s
34ms/sample - loss: 0.0123 - acc: 1.0000 - val_loss: 0.4354 - val_acc: 0.8000
Epoch 37/500
24/30 [=======================>...] - ETA: 0s - loss: 0.0155 - acc: 1.000 -
ETA: 0s - loss: 0.0119 - acc: 1.000 - ETA: 0s - loss: 0.0089 - acc:
1.0000Restoring model weights from the end of the best epoch.
30/30 [==============================] - 1s 38ms/sample - loss: 0.0127 - acc:
1.0000 - val_loss: 0.4820 - val_acc: 0.8000
Epoch 00037: early stopping

[26]: model.summary()

Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_2 (Conv2D) (None, 198, 198, 64) 1792
_________________________________________________________________
activation_3 (Activation) (None, 198, 198, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 99, 99, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 97, 97, 64) 36928

7
_________________________________________________________________
activation_4 (Activation) (None, 97, 97, 64) 0
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 48, 48, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 147456) 0
_________________________________________________________________
dense_2 (Dense) (None, 64) 9437248
_________________________________________________________________
dense_3 (Dense) (None, 3) 195
_________________________________________________________________
activation_5 (Activation) (None, 3) 0
=================================================================
Total params: 9,476,163
Trainable params: 9,476,163
Non-trainable params: 0
_________________________________________________________________

[27]: model.save('burn.model')
[28]: model = tf.keras.models.load_model("burn.model")
[29]: # Visualization
%matplotlib inline

train_loss = hist.history['loss']
val_loss = hist.history['val_loss']
train_acc = hist.history['acc']
val_acc = hist.history['val_acc']

plt.figure(1, figsize = (7,5))


plt.plot(train_loss)
plt.plot(val_loss)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.legend(['train_loss', 'val_loss'])

plt.figure(2, figsize = (7,5))


plt.plot(train_acc)
plt.plot(val_acc)
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.legend(['train_acc', 'val_acc'])
[29]: <matplotlib.legend.Legend at 0x22e83fba278>

8
9
[30]: # Test
#model = tf.keras.models.load_model("burn.model")
filepath = r'C:\Burns_BIP_US_database\Burns_BIP_US_database\Testing set\24.jpg'
test_img = prepare(filepath, IMG_SIZE, DIM)
prediction = model.predict(test_img)
pred_img_class = np.argmax(prediction[0]) + 1 # one added because python starts␣
,→from class 0

print("Class: ", pred_img_class, CATEGORIES[int(pred_img_class - 1)])

Class: 2 deep

[31]: DATADIR = "C:\Burns_BIP_US_database2\Test"

# Create testing dataset


testing_data = create_image_data(CATEGORIES, IMG_SIZE, DIM, DATADIR)

# Randomize the dataset


random.shuffle(testing_data)

10
Xt = []
yt = []

for features,label in testing_data:


Xt.append(features)
yt.append(label)

# print(X[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))

Xt = np.array(Xt).reshape(-1, IMG_SIZE, IMG_SIZE, DIM)


print("Shape Xt", Xt.shape)

print(yt)

# Uncomment if want to save Xt and yt


'''
# Save it
pickle_out = open("Xt.pickle","wb")
pickle.dump(Xt, pickle_out)
pickle_out.close()

pickle_out = open("yF.pickle","wb")
pickle.dump(yt, pickle_out)
pickle_out.close()

'''

superficial

100%|
| 28/28 [00:00<00:00, 1167.08it/s]

deep

100%|
| 17/17 [00:00<00:00, 1218.11it/s]

full

100%|
| 5/5 [00:00<00:00, 1252.18it/s]

Shape Xt (50, 200, 200, 3)


[1, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0, 0, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0]

[31]: '\n# Save it\npickle_out = open("Xt.pickle","wb")\npickle.dump(Xt,


pickle_out)\npickle_out.close()\n\npickle_out =

11
open("yF.pickle","wb")\npickle.dump(yt, pickle_out)\npickle_out.close()\n\n'
[32]: # Prediction
Xt = Xt/255.0
prediction = model.predict(Xt)
[33]: prediction
[33]: array([[6.9177598e-02, 9.1661644e-01, 1.4206015e-02],
[9.9750394e-01, 2.4893235e-03, 6.8358472e-06],
[5.6246854e-02, 9.3490022e-01, 8.8529726e-03],
[3.6748189e-02, 9.5945191e-01, 3.7999116e-03],
[2.4935640e-02, 6.1624438e-01, 3.5881993e-01],
[2.4122265e-03, 8.4805250e-01, 1.4953528e-01],
[2.8117441e-02, 5.4677016e-01, 4.2511237e-01],
[9.0864080e-01, 5.3545080e-02, 3.7814125e-02],
[8.4944558e-01, 1.3588981e-01, 1.4664594e-02],
[9.9788862e-01, 2.1089872e-03, 2.3801765e-06],
[9.9983132e-01, 1.6861479e-04, 4.4811838e-10],
[9.9999833e-01, 1.6225830e-06, 4.1408798e-15],
[8.6725187e-03, 9.8945707e-01, 1.8704239e-03],
[1.6471755e-02, 1.2970529e-01, 8.5382295e-01],
[9.9956626e-01, 4.3370176e-04, 4.1751758e-11],
[4.8086150e-03, 5.1621491e-01, 4.7897646e-01],
[9.9995089e-01, 4.4393797e-05, 4.7545286e-06],
[9.9768198e-01, 2.3048581e-03, 1.3260562e-05],
[3.7941253e-01, 5.9800726e-01, 2.2580160e-02],
[1.0000000e+00, 2.0629172e-15, 5.6530202e-21],
[9.9988103e-01, 1.1899669e-04, 2.6429609e-12],
[5.8511365e-02, 9.2762667e-01, 1.3861956e-02],
[9.9996841e-01, 1.2329773e-05, 1.9345525e-05],
[9.9999821e-01, 1.8217075e-06, 2.5016759e-13],
[1.0000000e+00, 6.2744276e-09, 6.7194739e-12],
[9.9999893e-01, 1.0925392e-06, 1.5842545e-12],
[9.9897909e-01, 7.0252945e-04, 3.1838933e-04],
[3.4734741e-02, 8.8429219e-01, 8.0973119e-02],
[1.3740325e-01, 8.1055164e-01, 5.2045096e-02],
[9.9999952e-01, 4.2803453e-07, 4.4157345e-14],
[3.3266719e-02, 9.6620017e-01, 5.3305435e-04],
[9.9999988e-01, 9.4944482e-08, 3.8909452e-11],
[9.3997318e-01, 2.3687160e-02, 3.6339749e-02],
[9.9964213e-01, 3.5643266e-04, 1.3845670e-06],
[1.1273263e-02, 1.9017610e-01, 7.9855061e-01],
[9.8947537e-01, 9.9517526e-03, 5.7291152e-04],
[1.0000000e+00, 1.0749061e-11, 1.0693595e-20],
[1.0000000e+00, 1.0757337e-09, 1.5974583e-16],
[2.6470406e-02, 9.0730214e-01, 6.6227488e-02],
[9.9842381e-01, 1.5758446e-03, 2.9804025e-07],

12
[4.2742612e-03, 7.2721851e-01, 2.6850730e-01],
[9.9974221e-01, 2.5745321e-04, 3.6373274e-07],
[3.9963532e-02, 9.5803803e-01, 1.9984038e-03],
[9.9592823e-01, 4.0436038e-03, 2.8134687e-05],
[3.4613766e-02, 5.2277368e-01, 4.4261265e-01],
[5.1432151e-02, 8.3336318e-01, 1.1520473e-01],
[9.9562007e-01, 4.3787966e-03, 1.0616234e-06],
[1.7357057e-02, 7.1994209e-01, 2.6270083e-01],
[7.3362082e-02, 6.5356177e-01, 2.7307615e-01],
[1.0000000e+00, 2.4787825e-09, 4.2579191e-14]], dtype=float32)
[34]: pred = np.argmax(prediction, axis = 1)
[35]: pred
[35]: array([1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0,
1, 1, 0, 1, 1, 0], dtype=int64)
[36]: # Create confusion matrix
conf_mat = confusion_matrix(yt, pred)
print(conf_mat)

[[27 1 0]
[ 0 17 0]
[ 1 2 2]]

[37]: # Pandas view of confusion matrix


data = {CATEGORIES[0]: conf_mat[0,:],
CATEGORIES[1]: conf_mat[1,:],
CATEGORIES[2]: conf_mat[2,:]}

df = pd.DataFrame(data, index = CATEGORIES)


print(df)

superficial deep full


superficial 27 0 1
deep 1 17 2
full 0 0 2

[46]: # Test accuracy


num = np.trace(conf_mat) # np.trace --> adds diagonal values
den = np.sum(np.sum(conf_mat, axis = 0))
test_acc = num/den

print("Correct instances: ", num)


print("All instances: ", den)
print("Test accuracy: ", test_acc*100)

13
Correct instances: 46
All instances: 50
Test accuracy: 92.0

[38]: # Precision, recall, F1 Score


print("Precision: %0.3f" % precision_score(np.array(yt), pred, average='micro'))
print("Recall: %0.3f" % recall_score(yt, pred, average='micro'))
print("F1 Score: %0.3f" % f1_score(yt, pred, average = 'micro'))

Precision: 0.920
Recall: 0.920
F1 Score: 0.920

[ ]:

14

Das könnte Ihnen auch gefallen