Skip to content
Snippets Groups Projects
Commit 2a95fa46 authored by Paul G's avatar Paul G
Browse files

changed model training

parent dae294d8
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id: tags:
# Quelle
https://github.com/bnsreenu/python_for_microscopists/blob/master/260_image_anomaly_detection_using_autoencoders/260_image_anomaly_detection_using_autoencoders.py
``Infos``\
Detecting anomaly images using AutoEncoders. (Sorting an entire image as either normal or anomaly)\
Here, we use both the reconstruction error and also the kernel density estimation based on the vectors in the latent space.
We will consider the bottleneck layer outputfrom our autoencoder as the latent space.\
This code uses the malarial data set but it can be easily applied to any application.
Data from: https://data.lhncbc.nlm.nih.gov/public/Malaria/cell_images.zip
%% Cell type:code id: tags:
``` python
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import pandas as pd
```
%% Cell type:code id: tags:
``` python
#Size of our input images
SIZE = 128
#############################################################################
#Define generators for training, validation and also anomaly data.
batch_size = 64
datagen = ImageDataGenerator(rescale=1./255)
train_generator = datagen.flow_from_directory(
'data/cell_images/uninfected_train/',
target_size=(SIZE, SIZE),
batch_size=batch_size,
class_mode='input'
)
validation_generator = datagen.flow_from_directory(
'data/cell_images/uninfected_test/',
target_size=(SIZE, SIZE),
batch_size=batch_size,
class_mode='input'
)
anomaly_generator = datagen.flow_from_directory(
'data/cell_images/parasitized/',
target_size=(SIZE, SIZE),
batch_size=batch_size,
class_mode='input'
)
```
%% Output
Found 2000 images belonging to 1 classes.
Found 500 images belonging to 1 classes.
Found 2000 images belonging to 1 classes.
%% Cell type:code id: tags:
``` python
#Define the autoencoder.
#Try to make the bottleneck layer size as small as possible to make it easy for
#density calculations and also picking appropriate thresholds.
#Encoder
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(SIZE, SIZE, 3)))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
#Decoder
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(3, (3, 3), activation='sigmoid', padding='same'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse'])
model.summary()
```
%% Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 128, 128, 64) 1792
max_pooling2d (MaxPooling2 (None, 64, 64, 64) 0
D)
conv2d_1 (Conv2D) (None, 64, 64, 32) 18464
max_pooling2d_1 (MaxPoolin (None, 32, 32, 32) 0
g2D)
conv2d_2 (Conv2D) (None, 32, 32, 16) 4624
max_pooling2d_2 (MaxPoolin (None, 16, 16, 16) 0
g2D)
conv2d_3 (Conv2D) (None, 16, 16, 16) 2320
up_sampling2d (UpSampling2 (None, 32, 32, 16) 0
D)
conv2d_4 (Conv2D) (None, 32, 32, 32) 4640
up_sampling2d_1 (UpSamplin (None, 64, 64, 32) 0
g2D)
conv2d_5 (Conv2D) (None, 64, 64, 64) 18496
up_sampling2d_2 (UpSamplin (None, 128, 128, 64) 0
g2D)
conv2d_6 (Conv2D) (None, 128, 128, 3) 1731
=================================================================
Total params: 52067 (203.39 KB)
Trainable params: 52067 (203.39 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
%% Cell type:code id: tags:
``` python
#Fit the model.
history = model.fit(
train_generator,
steps_per_epoch= 500 // batch_size - 2,
epochs=200,
validation_data=validation_generator,
validation_steps=75 // batch_size,
shuffle = True)
#plot the training and validation accuracy and loss at each epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
```
%% Output
Epoch 1/200
5/5 [==============================] - 28s 5s/step - loss: 0.0921 - mse: 0.0921 - val_loss: 0.0807 - val_mse: 0.0807
Epoch 2/200
5/5 [==============================] - 30s 6s/step - loss: 0.0741 - mse: 0.0741 - val_loss: 0.0575 - val_mse: 0.0575
Epoch 3/200
5/5 [==============================] - 27s 5s/step - loss: 0.0520 - mse: 0.0520 - val_loss: 0.0420 - val_mse: 0.0420
Epoch 4/200
5/5 [==============================] - 26s 5s/step - loss: 0.0348 - mse: 0.0348 - val_loss: 0.0249 - val_mse: 0.0249
Epoch 5/200
5/5 [==============================] - 28s 6s/step - loss: 0.0229 - mse: 0.0229 - val_loss: 0.0183 - val_mse: 0.0183
Epoch 6/200
5/5 [==============================] - 30s 6s/step - loss: 0.0171 - mse: 0.0171 - val_loss: 0.0165 - val_mse: 0.0165
Epoch 7/200
5/5 [==============================] - 31s 6s/step - loss: 0.0158 - mse: 0.0158 - val_loss: 0.0144 - val_mse: 0.0144
Epoch 8/200
5/5 [==============================] - 33s 7s/step - loss: 0.0143 - mse: 0.0143 - val_loss: 0.0136 - val_mse: 0.0136
Epoch 9/200
5/5 [==============================] - 39s 8s/step - loss: 0.0134 - mse: 0.0134 - val_loss: 0.0135 - val_mse: 0.0135
Epoch 10/200
5/5 [==============================] - 37s 7s/step - loss: 0.0127 - mse: 0.0127 - val_loss: 0.0125 - val_mse: 0.0125
Epoch 11/200
5/5 [==============================] - 26s 5s/step - loss: 0.0123 - mse: 0.0123 - val_loss: 0.0119 - val_mse: 0.0119
Epoch 12/200
5/5 [==============================] - 28s 6s/step - loss: 0.0117 - mse: 0.0117 - val_loss: 0.0114 - val_mse: 0.0114
Epoch 13/200
5/5 [==============================] - 26s 5s/step - loss: 0.0110 - mse: 0.0110 - val_loss: 0.0110 - val_mse: 0.0110
Epoch 14/200
5/5 [==============================] - 22s 4s/step - loss: 0.0109 - mse: 0.0109 - val_loss: 0.0121 - val_mse: 0.0121
Epoch 15/200
5/5 [==============================] - 25s 5s/step - loss: 0.0107 - mse: 0.0107 - val_loss: 0.0103 - val_mse: 0.0103
Epoch 16/200
5/5 [==============================] - 25s 5s/step - loss: 0.0105 - mse: 0.0105 - val_loss: 0.0104 - val_mse: 0.0104
Epoch 17/200
5/5 [==============================] - 21s 4s/step - loss: 0.0101 - mse: 0.0101 - val_loss: 0.0106 - val_mse: 0.0106
Epoch 18/200
5/5 [==============================] - 24s 5s/step - loss: 0.0103 - mse: 0.0103 - val_loss: 0.0101 - val_mse: 0.0101
Epoch 19/200
5/5 [==============================] - 24s 5s/step - loss: 0.0100 - mse: 0.0100 - val_loss: 0.0100 - val_mse: 0.0100
Epoch 20/200
5/5 [==============================] - 25s 5s/step - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0100 - val_mse: 0.0100
Epoch 21/200
5/5 [==============================] - 25s 5s/step - loss: 0.0097 - mse: 0.0097 - val_loss: 0.0091 - val_mse: 0.0091
Epoch 22/200
5/5 [==============================] - 21s 4s/step - loss: 0.0094 - mse: 0.0094 - val_loss: 0.0095 - val_mse: 0.0095
Epoch 23/200
5/5 [==============================] - 21s 5s/step - loss: 0.0093 - mse: 0.0093 - val_loss: 0.0099 - val_mse: 0.0099
Epoch 24/200
5/5 [==============================] - 21s 4s/step - loss: 0.0094 - mse: 0.0094 - val_loss: 0.0091 - val_mse: 0.0091
Epoch 25/200
5/5 [==============================] - 24s 5s/step - loss: 0.0091 - mse: 0.0091 - val_loss: 0.0090 - val_mse: 0.0090
Epoch 26/200
5/5 [==============================] - 27s 5s/step - loss: 0.0088 - mse: 0.0088 - val_loss: 0.0087 - val_mse: 0.0087
Epoch 27/200
5/5 [==============================] - 26s 5s/step - loss: 0.0087 - mse: 0.0087 - val_loss: 0.0087 - val_mse: 0.0087
Epoch 28/200
5/5 [==============================] - 29s 6s/step - loss: 0.0088 - mse: 0.0088 - val_loss: 0.0081 - val_mse: 0.0081
Epoch 29/200
5/5 [==============================] - 31s 6s/step - loss: 0.0084 - mse: 0.0084 - val_loss: 0.0083 - val_mse: 0.0083
Epoch 30/200
5/5 [==============================] - 26s 5s/step - loss: 0.0082 - mse: 0.0082 - val_loss: 0.0081 - val_mse: 0.0081
Epoch 31/200
5/5 [==============================] - 24s 5s/step - loss: 0.0080 - mse: 0.0080 - val_loss: 0.0078 - val_mse: 0.0078
Epoch 32/200
5/5 [==============================] - 24s 5s/step - loss: 0.0077 - mse: 0.0077 - val_loss: 0.0077 - val_mse: 0.0077
Epoch 33/200
5/5 [==============================] - 24s 5s/step - loss: 0.0075 - mse: 0.0075 - val_loss: 0.0070 - val_mse: 0.0070
Epoch 34/200
5/5 [==============================] - 24s 5s/step - loss: 0.0074 - mse: 0.0074 - val_loss: 0.0074 - val_mse: 0.0074
Epoch 35/200
5/5 [==============================] - 24s 5s/step - loss: 0.0071 - mse: 0.0071 - val_loss: 0.0073 - val_mse: 0.0073
Epoch 36/200
5/5 [==============================] - 24s 5s/step - loss: 0.0072 - mse: 0.0072 - val_loss: 0.0069 - val_mse: 0.0069
Epoch 37/200
5/5 [==============================] - 24s 5s/step - loss: 0.0072 - mse: 0.0072 - val_loss: 0.0071 - val_mse: 0.0071
Epoch 38/200
5/5 [==============================] - 24s 5s/step - loss: 0.0072 - mse: 0.0072 - val_loss: 0.0069 - val_mse: 0.0069
Epoch 39/200
5/5 [==============================] - 21s 4s/step - loss: 0.0069 - mse: 0.0069 - val_loss: 0.0069 - val_mse: 0.0069
Epoch 40/200
5/5 [==============================] - 21s 4s/step - loss: 0.0068 - mse: 0.0068 - val_loss: 0.0068 - val_mse: 0.0068
Epoch 41/200
5/5 [==============================] - 24s 5s/step - loss: 0.0067 - mse: 0.0067 - val_loss: 0.0067 - val_mse: 0.0067
Epoch 42/200
5/5 [==============================] - 24s 5s/step - loss: 0.0066 - mse: 0.0066 - val_loss: 0.0065 - val_mse: 0.0065
Epoch 43/200
5/5 [==============================] - 25s 5s/step - loss: 0.0064 - mse: 0.0064 - val_loss: 0.0062 - val_mse: 0.0062
Epoch 44/200
5/5 [==============================] - 26s 5s/step - loss: 0.0064 - mse: 0.0064 - val_loss: 0.0063 - val_mse: 0.0063
Epoch 45/200
5/5 [==============================] - 28s 6s/step - loss: 0.0063 - mse: 0.0063 - val_loss: 0.0063 - val_mse: 0.0063
Epoch 46/200
5/5 [==============================] - 25s 5s/step - loss: 0.0063 - mse: 0.0063 - val_loss: 0.0063 - val_mse: 0.0063
Epoch 47/200
5/5 [==============================] - 30s 6s/step - loss: 0.0065 - mse: 0.0065 - val_loss: 0.0063 - val_mse: 0.0063
Epoch 48/200
5/5 [==============================] - 22s 4s/step - loss: 0.0063 - mse: 0.0063 - val_loss: 0.0062 - val_mse: 0.0062
Epoch 49/200
5/5 [==============================] - 25s 5s/step - loss: 0.0063 - mse: 0.0063 - val_loss: 0.0063 - val_mse: 0.0063
Epoch 50/200
5/5 [==============================] - 26s 5s/step - loss: 0.0061 - mse: 0.0061 - val_loss: 0.0059 - val_mse: 0.0059
Epoch 51/200
5/5 [==============================] - 24s 5s/step - loss: 0.0061 - mse: 0.0061 - val_loss: 0.0060 - val_mse: 0.0060
Epoch 52/200
5/5 [==============================] - 24s 5s/step - loss: 0.0059 - mse: 0.0059 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 53/200
5/5 [==============================] - 25s 5s/step - loss: 0.0060 - mse: 0.0060 - val_loss: 0.0064 - val_mse: 0.0064
Epoch 54/200
5/5 [==============================] - 26s 5s/step - loss: 0.0059 - mse: 0.0059 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 55/200
5/5 [==============================] - 30s 6s/step - loss: 0.0062 - mse: 0.0062 - val_loss: 0.0057 - val_mse: 0.0057
Epoch 56/200
5/5 [==============================] - 26s 5s/step - loss: 0.0061 - mse: 0.0061 - val_loss: 0.0062 - val_mse: 0.0062
Epoch 57/200
5/5 [==============================] - 35s 7s/step - loss: 0.0061 - mse: 0.0061 - val_loss: 0.0061 - val_mse: 0.0061
Epoch 58/200
5/5 [==============================] - 35s 7s/step - loss: 0.0059 - mse: 0.0059 - val_loss: 0.0062 - val_mse: 0.0062
Epoch 59/200
5/5 [==============================] - 37s 8s/step - loss: 0.0058 - mse: 0.0058 - val_loss: 0.0059 - val_mse: 0.0059
Epoch 60/200
5/5 [==============================] - 44s 9s/step - loss: 0.0058 - mse: 0.0058 - val_loss: 0.0061 - val_mse: 0.0061
Epoch 61/200
5/5 [==============================] - 31s 6s/step - loss: 0.0057 - mse: 0.0057 - val_loss: 0.0057 - val_mse: 0.0057
Epoch 62/200
5/5 [==============================] - 39s 7s/step - loss: 0.0057 - mse: 0.0057 - val_loss: 0.0059 - val_mse: 0.0059
Epoch 63/200
5/5 [==============================] - 49s 10s/step - loss: 0.0057 - mse: 0.0057 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 64/200
5/5 [==============================] - 45s 9s/step - loss: 0.0056 - mse: 0.0056 - val_loss: 0.0059 - val_mse: 0.0059
Epoch 65/200
5/5 [==============================] - 53s 11s/step - loss: 0.0057 - mse: 0.0057 - val_loss: 0.0054 - val_mse: 0.0054
Epoch 66/200
5/5 [==============================] - 46s 10s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 67/200
5/5 [==============================] - 53s 11s/step - loss: 0.0056 - mse: 0.0056 - val_loss: 0.0056 - val_mse: 0.0056
Epoch 68/200
5/5 [==============================] - 37s 7s/step - loss: 0.0057 - mse: 0.0057 - val_loss: 0.0055 - val_mse: 0.0055
Epoch 69/200
5/5 [==============================] - 38s 8s/step - loss: 0.0055 - mse: 0.0055 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 70/200
5/5 [==============================] - 36s 7s/step - loss: 0.0056 - mse: 0.0056 - val_loss: 0.0054 - val_mse: 0.0054
Epoch 71/200
5/5 [==============================] - 31s 6s/step - loss: 0.0055 - mse: 0.0055 - val_loss: 0.0053 - val_mse: 0.0053
Epoch 72/200
5/5 [==============================] - 29s 6s/step - loss: 0.0055 - mse: 0.0055 - val_loss: 0.0057 - val_mse: 0.0057
Epoch 73/200
5/5 [==============================] - 27s 5s/step - loss: 0.0055 - mse: 0.0055 - val_loss: 0.0055 - val_mse: 0.0055
Epoch 74/200
5/5 [==============================] - 26s 5s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0056 - val_mse: 0.0056
Epoch 75/200
5/5 [==============================] - 26s 5s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 76/200
5/5 [==============================] - 27s 5s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0057 - val_mse: 0.0057
Epoch 77/200
5/5 [==============================] - 26s 5s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0054 - val_mse: 0.0054
Epoch 78/200
5/5 [==============================] - 28s 6s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 79/200
5/5 [==============================] - 31s 6s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 80/200
5/5 [==============================] - 33s 7s/step - loss: 0.0054 - mse: 0.0054 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 81/200
5/5 [==============================] - 34s 7s/step - loss: 0.0053 - mse: 0.0053 - val_loss: 0.0058 - val_mse: 0.0058
Epoch 82/200
5/5 [==============================] - 28s 5s/step - loss: 0.0055 - mse: 0.0055 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 83/200
5/5 [==============================] - 26s 5s/step - loss: 0.0053 - mse: 0.0053 - val_loss: 0.0053 - val_mse: 0.0053
Epoch 84/200
5/5 [==============================] - 27s 5s/step - loss: 0.0053 - mse: 0.0053 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 85/200
5/5 [==============================] - 29s 5s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 86/200
5/5 [==============================] - 26s 5s/step - loss: 0.0052 - mse: 0.0052 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 87/200
5/5 [==============================] - 27s 5s/step - loss: 0.0052 - mse: 0.0052 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 88/200
5/5 [==============================] - 27s 5s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 89/200
5/5 [==============================] - 26s 5s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0049 - val_mse: 0.0049
Epoch 90/200
5/5 [==============================] - 26s 5s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 91/200
5/5 [==============================] - 23s 4s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0053 - val_mse: 0.0053
Epoch 92/200
5/5 [==============================] - 23s 4s/step - loss: 0.0052 - mse: 0.0052 - val_loss: 0.0055 - val_mse: 0.0055
Epoch 93/200
5/5 [==============================] - 26s 5s/step - loss: 0.0052 - mse: 0.0052 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 94/200
5/5 [==============================] - 23s 5s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 95/200
5/5 [==============================] - 23s 4s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 96/200
5/5 [==============================] - 26s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 97/200
5/5 [==============================] - 29s 6s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 98/200
5/5 [==============================] - 31s 6s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 99/200
5/5 [==============================] - 29s 6s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 100/200
5/5 [==============================] - 29s 6s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 101/200
5/5 [==============================] - 23s 4s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 102/200
5/5 [==============================] - 25s 5s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0052 - val_mse: 0.0052
Epoch 103/200
5/5 [==============================] - 24s 5s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 104/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 105/200
5/5 [==============================] - 27s 6s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 106/200
5/5 [==============================] - 24s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 107/200
5/5 [==============================] - 25s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 108/200
5/5 [==============================] - 21s 4s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 109/200
5/5 [==============================] - 25s 5s/step - loss: 0.0051 - mse: 0.0051 - val_loss: 0.0053 - val_mse: 0.0053
Epoch 110/200
5/5 [==============================] - 25s 5s/step - loss: 0.0050 - mse: 0.0050 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 111/200
5/5 [==============================] - 25s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 112/200
5/5 [==============================] - 26s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 113/200
5/5 [==============================] - 26s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 114/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 115/200
5/5 [==============================] - 28s 6s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0050 - val_mse: 0.0050
Epoch 116/200
5/5 [==============================] - 29s 6s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0049 - val_mse: 0.0049
Epoch 117/200
5/5 [==============================] - 30s 6s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 118/200
5/5 [==============================] - 30s 6s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 119/200
5/5 [==============================] - 22s 4s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 120/200
5/5 [==============================] - 24s 6s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 121/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 122/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 123/200
5/5 [==============================] - 25s 5s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0049 - val_mse: 0.0049
Epoch 124/200
5/5 [==============================] - 26s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 125/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 126/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 127/200
5/5 [==============================] - 21s 4s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 128/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 129/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 130/200
5/5 [==============================] - 26s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 131/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 132/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 133/200
5/5 [==============================] - 27s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 134/200
5/5 [==============================] - 28s 6s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 135/200
5/5 [==============================] - 30s 6s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 136/200
5/5 [==============================] - 31s 6s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 137/200
5/5 [==============================] - 28s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 138/200
5/5 [==============================] - 26s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 139/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 140/200
5/5 [==============================] - 25s 5s/step - loss: 0.0048 - mse: 0.0048 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 141/200
5/5 [==============================] - 26s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 142/200
5/5 [==============================] - 26s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 143/200
5/5 [==============================] - 22s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 144/200
5/5 [==============================] - 22s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 145/200
5/5 [==============================] - 26s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 146/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0049 - val_mse: 0.0049
Epoch 147/200
5/5 [==============================] - 22s 4s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 148/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 149/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 150/200
5/5 [==============================] - 22s 4s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 151/200
5/5 [==============================] - 26s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 152/200
5/5 [==============================] - 30s 6s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 153/200
5/5 [==============================] - 31s 6s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 154/200
5/5 [==============================] - 31s 6s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 155/200
5/5 [==============================] - 32s 6s/step - loss: 0.0049 - mse: 0.0049 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 156/200
5/5 [==============================] - 28s 6s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 157/200
5/5 [==============================] - 22s 4s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 158/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 159/200
5/5 [==============================] - 22s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 160/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 161/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0049 - val_mse: 0.0049
Epoch 162/200
5/5 [==============================] - 25s 5s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 163/200
5/5 [==============================] - 22s 4s/step - loss: 0.0046 - mse: 0.0046 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 164/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 165/200
5/5 [==============================] - 25s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 166/200
5/5 [==============================] - 26s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 167/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 168/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0048 - val_mse: 0.0048
Epoch 169/200
5/5 [==============================] - 26s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 170/200
5/5 [==============================] - 25s 6s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 171/200
5/5 [==============================] - 31s 6s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 172/200
5/5 [==============================] - 28s 7s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0047 - val_mse: 0.0047
Epoch 173/200
5/5 [==============================] - 32s 7s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 174/200
5/5 [==============================] - 28s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 175/200
5/5 [==============================] - 27s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 176/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 177/200
5/5 [==============================] - 26s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 178/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 179/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 180/200
5/5 [==============================] - 25s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 181/200
5/5 [==============================] - 26s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0042 - val_mse: 0.0042
Epoch 182/200
5/5 [==============================] - 26s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 183/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 184/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 185/200
5/5 [==============================] - 24s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 186/200
5/5 [==============================] - 24s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 187/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0046 - val_mse: 0.0046
Epoch 188/200
5/5 [==============================] - 27s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 189/200
5/5 [==============================] - 28s 6s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 190/200
5/5 [==============================] - 30s 6s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 191/200
5/5 [==============================] - 31s 6s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 192/200
5/5 [==============================] - 26s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 193/200
5/5 [==============================] - 27s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 194/200
5/5 [==============================] - 25s 5s/step - loss: 0.0043 - mse: 0.0043 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 195/200
5/5 [==============================] - 25s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 196/200
5/5 [==============================] - 25s 5s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0051 - val_mse: 0.0051
Epoch 197/200
5/5 [==============================] - 25s 5s/step - loss: 0.0047 - mse: 0.0047 - val_loss: 0.0045 - val_mse: 0.0045
Epoch 198/200
5/5 [==============================] - 22s 4s/step - loss: 0.0045 - mse: 0.0045 - val_loss: 0.0043 - val_mse: 0.0043
Epoch 199/200
5/5 [==============================] - 21s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0044 - val_mse: 0.0044
Epoch 200/200
5/5 [==============================] - 26s 5s/step - loss: 0.0044 - mse: 0.0044 - val_loss: 0.0045 - val_mse: 0.0045
%% Cell type:code id: tags:
``` python
# Speichern des trainierten Modells
model.save('trained_modell.h5')
```
%% Output
c:\Users\pgran\OneDrive - Hochschule Hannover\Semester 10\Einarbeitung\detecting_anomalies\.venv\lib\site-packages\keras\src\engine\training.py:3000: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
%% Cell type:code id: tags:
``` python
# load weights into new model
model.load_weights("trained_modell.h5")
print("Loaded model from disk")
```
%% Output
Loaded model from disk
%% Cell type:code id: tags:
``` python
# Get all batches generated by the datagen and pick a batch for prediction
#Just to test the model.
data_batch = [] #Capture all training batches as a numpy array
img_num = 0
while img_num <= train_generator.batch_index: #gets each generated batch of size batch_size
data = train_generator.next()
data_batch.append(data[0])
img_num = img_num + 1
predicted = model.predict(data_batch[0]) #Predict on the first batch of images
#Sanity check, view few images and corresponding reconstructions
image_number = random.randint(0, predicted.shape[0])
plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.imshow(data_batch[0][image_number])
plt.subplot(122)
plt.imshow(predicted[image_number])
plt.show()
```
%% Output
2/2 [==============================] - 2s 572ms/step
%% Cell type:code id: tags:
``` python
#Let us examine the reconstruction error between our validation data (good/normal images)
# and the anomaly images
validation_error = model.evaluate_generator(validation_generator)
anomaly_error = model.evaluate_generator(anomaly_generator)
print("Recon. error for the validation (normal) data is: ", validation_error)
print("Recon. error for the anomaly data is: ", anomaly_error)
```
%% Output
C:\Users\pgran\AppData\Local\Temp\ipykernel_8048\2468543786.py:3: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.
validation_error = model.evaluate_generator(validation_generator)
C:\Users\pgran\AppData\Local\Temp\ipykernel_8048\2468543786.py:4: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.
anomaly_error = model.evaluate_generator(anomaly_generator)
Recon. error for the validation (normal) data is: [0.004411873407661915, 0.004411873407661915]
Recon. error for the anomaly data is: [0.004912924952805042, 0.004912924952805042]
%% Cell type:code id: tags:
``` python
#Let us extract (or build) the encoder network, with trained weights.
#This is used to get the compressed output (latent space) of the input image.
#The compressed output is then used to calculate the KDE
encoder_model = Sequential()
encoder_model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(SIZE, SIZE, 3), weights=model.layers[0].get_weights()) )
encoder_model.add(MaxPooling2D((2, 2), padding='same'))
encoder_model.add(Conv2D(32, (3, 3), activation='relu', padding='same', weights=model.layers[2].get_weights()))
encoder_model.add(MaxPooling2D((2, 2), padding='same'))
encoder_model.add(Conv2D(16, (3, 3), activation='relu', padding='same', weights=model.layers[4].get_weights()))
encoder_model.add(MaxPooling2D((2, 2), padding='same'))
encoder_model.summary()
########################################################
# Calculate KDE using sklearn
from sklearn.neighbors import KernelDensity
#Get encoded output of input images = Latent space
encoded_images = encoder_model.predict_generator(train_generator)
# Flatten the encoder output because KDE from sklearn takes 1D vectors as input
encoder_output_shape = encoder_model.output_shape #Here, we have 16x16x16
out_vector_shape = encoder_output_shape[1]*encoder_output_shape[2]*encoder_output_shape[3]
encoded_images_vector = [np.reshape(img, (out_vector_shape)) for img in encoded_images]
#Fit KDE to the image latent data
kde = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(encoded_images_vector)
#Calculate density and reconstruction error to find their means values for
#good and anomaly images.
#We use these mean and sigma to set thresholds.
def calc_density_and_recon_error(batch_images):
density_list=[]
recon_error_list=[]
for im in range(0, batch_images.shape[0]-1):
img = batch_images[im]
img = img[np.newaxis, :,:,:]
encoded_img = encoder_model.predict([[img]]) # Create a compressed version of the image using the encoder
encoded_img = [np.reshape(img, (out_vector_shape)) for img in encoded_img] # Flatten the compressed image
density = kde.score_samples(encoded_img)[0] # get a density score for the new image
reconstruction = model.predict([[img]])
reconstruction_error = model.evaluate([reconstruction],[[img]], batch_size = 1)[0]
density_list.append(density)
recon_error_list.append(reconstruction_error)
average_density = np.mean(np.array(density_list))
stdev_density = np.std(np.array(density_list))
average_recon_error = np.mean(np.array(recon_error_list))
stdev_recon_error = np.std(np.array(recon_error_list))
return average_density, stdev_density, average_recon_error, stdev_recon_error
#Get average and std dev. of density and recon. error for uninfected and anomaly (parasited) images.
#For this let us generate a batch of images for each.
train_batch = train_generator.next()[0]
anomaly_batch = anomaly_generator.next()[0]
uninfected_values = calc_density_and_recon_error(train_batch)
anomaly_values = calc_density_and_recon_error(anomaly_batch)
```
%% Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_10 (Conv2D) (None, 128, 128, 64) 1792
max_pooling2d_6 (MaxPoolin (None, 64, 64, 64) 0
g2D)
conv2d_11 (Conv2D) (None, 64, 64, 32) 18464
max_pooling2d_7 (MaxPoolin (None, 32, 32, 32) 0
g2D)
conv2d_12 (Conv2D) (None, 32, 32, 16) 4624
max_pooling2d_8 (MaxPoolin (None, 16, 16, 16) 0
g2D)
=================================================================
Total params: 24880 (97.19 KB)
Trainable params: 24880 (97.19 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[10], line 16
12 encoder_model.summary()
14 ########################################################
15 # Calculate KDE using sklearn
---> 16 from sklearn.neighbors import KernelDensity
18 #Get encoded output of input images = Latent space
19 encoded_images = encoder_model.predict_generator(train_generator)
ModuleNotFoundError: No module named 'sklearn'
%% Cell type:code id: tags:
``` python
#Now, input unknown images and sort as Good or Anomaly
def check_anomaly(img_path):
density_threshold = 2500 #Set this value based on the above exercise
reconstruction_error_threshold = 0.004 # Set this value based on the above exercise
img = Image.open(img_path)
img = np.array(img.resize((128,128), Image.ANTIALIAS))
plt.imshow(img)
img = img / 255.
img = img[np.newaxis, :,:,:]
encoded_img = encoder_model.predict([[img]])
encoded_img = [np.reshape(img, (out_vector_shape)) for img in encoded_img]
density = kde.score_samples(encoded_img)[0]
reconstruction = model.predict([[img]])
reconstruction_error = model.evaluate([reconstruction],[[img]], batch_size = 1)[0]
if density < density_threshold or reconstruction_error > reconstruction_error_threshold:
print("The image is an anomaly")
else:
print("The image is NOT an anomaly")
#Load a couple of test images and verify whether they are reported as anomalies.
import glob
para_file_paths = glob.glob('cell_images2/parasitized/images/*')
uninfected_file_paths = glob.glob('cell_images2/uninfected_train/images/*')
#Anomaly image verification
num=random.randint(0,len(para_file_paths)-1)
check_anomaly(para_file_paths[num])
#Good/normal image verification
num=random.randint(0,len(para_file_paths)-1)
check_anomaly(uninfected_file_paths[num])
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment