Implementation
Here is one way you can Fine-tune Vgg16 while adding a batch normalization layer using Keras.
1. Import
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
from keras.layers import Input, Dense, Flatten, BatchNormalization, Activation
from keras.models import Sequential
from keras.models import Model
2. Load Model
#Specify input_size
img_height = 512
img_width = 512
input_tensor = Input(shape=(img_width, img_height, 3))
# Load ImageNet weights
# Remove all layers after FC layer by setting include_top to FALSE
vgg16 = VGG16(weights='imagenet',include_top=False,input_tensor=input_tensor)
vgg16.summary()
3. Create Head Model Using Sequential
number_of_classes = 3
head = Sequential()
#Create Flattened Layer
head.add(Flatten(input_shape=vgg16.output_shape[1:]))
#The number for dense connection can be anything. I just chose 128 randomly just for an example.
head.add(Dense(128))
#Apply Batch Norm before activation
head.add(BatchNormalization())
head.add(Activation('relu'))
#Repeat
head.add(Dense(64))
head.add(BatchNormalization())
head.add(Activation('relu'))
#The final head
head.add(Dense(number_of_classes, activation='softmax'))
4. Combine Model
combined_model= Model(vgg16.input, head(vgg16.output))
5. Freeze Layers
#Freeze all layers before the FC Layer
for layer in combined_model.layers[:19]:
layer.trainable = False
6.Compile Your model
#define loss function, optimizer, metrics
combined_model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])