Skip to content

Instantly share code, notes, and snippets.

@sadanalog
Created May 9, 2020 11:30
Show Gist options
  • Save sadanalog/e1d7b0c716afbc1e87d1e9934429f103 to your computer and use it in GitHub Desktop.
Save sadanalog/e1d7b0c716afbc1e87d1e9934429f103 to your computer and use it in GitHub Desktop.
from tensorflow.keras import datasets, layers, models, optimizers, regularizers, callbacks
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
def create_model(image_shape=(256, 4096, 1), print_summary=False):
# initial model
model = models.Sequential()
# CONV layer: filter 16, stride 7x7
model.add(layers.Conv2D(16, (7, 7),input_shape=image_shape))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# max pooling -> reduce image size
model.add(layers.MaxPooling2D((2, 2)))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(32, (5, 5), padding="same"))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# max pooling -> reduce image size
model.add(layers.MaxPooling2D((2, 2)))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), padding="same"))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# max pooling -> reduce image size
model.add(layers.MaxPooling2D((2, 2)))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), padding="same"))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# max pooling -> reduce image size
model.add(layers.MaxPooling2D((2, 2)))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
# flatten later -> from matrix to vector
model.add(layers.Flatten())
# fully connected layer -> nn layer with 64 nodes
model.add(layers.Dense(64))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
# fully connected layer -> nn layer with 64 nodes
model.add(layers.Dense(64))
# Batch Normalization layer -> avoid overfitting
model.add(layers.BatchNormalization())
# activation layer
model.add(layers.Activation('relu'))
# droput later -> avoid overfitting
model.add(layers.Dropout(0.25))
# output layer
model.add(layers.Dense(1, activation='sigmoid'))
# set model compiler
model.compile(optimizer='SGD', loss='binary_crossentropy', metrics=['accuracy'])
# show the CNN model detail
if print_summary:
model.summary()
return model
def train_model(model, xtrain, ytrain, xval, yval, n_epoch, batch_size):
# train CNN model
# batch size to reduce memory usage
# set early stopping to avoid overfitting
earlystopping = EarlyStopping(monitor='val_accuracy', patience=2)
filepath = project_path + "/model/weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint, earlystopping]
history = model.fit(xtrain, ytrain, epochs=n_epoch, batch_size=batch_size, validation_data=(xval, yval), callbacks=[callbacks_list])
return history
create_model(image_shape=(256, 4096, 1), print_summary=True)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment