-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFace Classifier.py
151 lines (108 loc) · 4.4 KB
/
Face Classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#!/usr/bin/env python
# coding: utf-8
# # Using VGG16 for our Face Classifier
#
# Freeze all layers except the top 4, as we'll only be training the top 4
from keras.applications import VGG16
# VGG16 was designed to work on 224 x 224 pixel input images sizes
img_rows, img_cols = 224, 224
# Re-loads the VGG16 model without the top or FC layers
model = VGG16(weights = 'imagenet',
include_top = False,
input_shape = (img_rows, img_cols, 3))
# Here we freeze the last 4 layers
# Layers are set to trainable as True by default
for layer in model.layers:
layer.trainable = False
# Let's print our layers
for (i,layer) in enumerate(model.layers):
print(str(i) + " "+ layer.__class__.__name__, layer.trainable)
# ### Let's make a function that returns our FC Head
def add_layer(bottom_model, num_classes):
"""creates the top or head of the model that will be
placed ontop of the bottom layers"""
top_model = bottom_model.output
top_model = GlobalAveragePooling2D()(top_model)
top_model = Dense(1024,activation='relu')(top_model)
top_model = Dense(512,activation='relu')(top_model)
top_model = Dense(num_classes,activation='softmax')(top_model)
return top_model
# ### Let's add our FC Head back onto MobileNet
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
# Set our class number to 3 (Young, Middle, Old)
num_classes = 3
FC_Head = add_layer(model, num_classes)
modelnew = Model(inputs = model.input, outputs = FC_Head)
print(modelnew.summary())
# ### Loading our Face Datasets
from keras.preprocessing.image import ImageDataGenerator
train_data_dir = '/root/datasets/train/'
validation_data_dir = '/root/datasets/validation/'
# Let's use some data augmentaiton
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=0.3,
height_shift_range=0.3,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# set our batch size (typically on most mid tier systems we'll use 16-32)
batch_size = 32
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode='categorical')
# ### Training out Model
# - Note we're using checkpointing and early stopping
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("/root/face_vgg16.h5",
monitor="val_loss",
mode="min",
save_best_only = True,
verbose=1)
earlystop = EarlyStopping(monitor = 'val_loss',
min_delta = 0,
patience = 3,
verbose = 1,
restore_best_weights = True)
# we put our call backs into a callback list
callbacks = [earlystop, checkpoint]
# We use a very small learning rate
modelnew.compile(loss = 'categorical_crossentropy',
optimizer = RMSprop(lr = 0.001),
metrics = ['accuracy'])
# Enter the number of training and validation samples here
nb_train_samples = 102
nb_validation_samples = 28
# We only train 5 EPOCHS
epochs = 2
batch_size = 16
history = modelnew.fit_generator(
train_generator,
steps_per_epoch = nb_train_samples // batch_size,
epochs = epochs,
callbacks = callbacks,
validation_data = validation_generator,
validation_steps = nb_validation_samples // batch_size )
modelnew.save("/root/face_vgg16.h5")
# ### Re-training the Model
# - Triggering the next job of the accuracy falls below 92%
final_accuracy=history.history["val_accuracy"][-1]
print(final_accuracy)
import os
if final_accuracy < 0.92:
os.system("curl --user 'admin:admin' http://192.168.43.133:8080/view/Mlops-project-1/job/Retraining_the_Model/build?token=retrain_model")
else:
print("Your New accuracy= ",final_accuracy)