Created
April 27, 2018 22:18
-
-
Save pabloformoso/575efdf1412decfc8bb1cc328cf9bf4b to your computer and use it in GitHub Desktop.
VGG16_Keras_Treansferlearning
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
rom keras import applications | |
from keras import optimizers | |
from keras.models import Sequential, Model | |
from keras.layers import Dropout, Flatten, Dense | |
from keras import backend as k | |
from pprint import pprint as pp | |
img_width, img_height = 300, 300 | |
train_data_dir = "/train" | |
validation_data_dir = "/validation" | |
nb_train_samples = 200 | |
nb_validation_samples = 40 | |
batch_size = 20 | |
epochs = 50 | |
model = applications.VGG16(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3)) | |
# Freeze the layers which you don't want to train. Here I am freezing the first 10 layers. | |
for layer in model.layers: | |
layer.trainable = False | |
#Adding custom Layers | |
x = model.output | |
x = Flatten()(x) | |
x = Dense(1024, activation="relu")(x) | |
x = Dropout(0.5)(x) | |
x = Dense(1024, activation="relu")(x) | |
predictions = Dense(100, activation="softmax")(x) | |
# creating the final model | |
model_final = Model(input = model.input, output = predictions) | |
# compile the model | |
model_final.compile(loss = "categorical_crossentropy", | |
optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), | |
metrics=["accuracy"]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment