Skip to content

Instantly share code, notes, and snippets.

@SnowMasaya
Last active June 28, 2018 23:23
Show Gist options
  • Save SnowMasaya/c2f5543d0cc02e4f0b965026355a79a3 to your computer and use it in GitHub Desktop.
Save SnowMasaya/c2f5543d0cc02e4f0b965026355a79a3 to your computer and use it in GitHub Desktop.
KerasでTensorboardを使用する際のTips ref: https://qiita.com/GushiSnow/items/6808121ba54fb2e53497
TensorBoard(log_dir=log_dir,
histogram_freq=1,
write_grads=True,
write_images=1,
embeddings_freq=1,
embeddings_layer_names=layer_name,
embeddings_metadata=metadata_file
)
with tf.name_scope('Model') as scope:
model = Sequential()
with tf.name_scope('Dense') as scope:
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,)))
with tf.name_scope('Relu') as scope:
model.add(Activation('relu'))
with tf.name_scope('Dropuout') as scope:
model.add(Dropout(DROPOUT))
with tf.name_scope('Dense2') as scope:
model.add(Dense(N_HIDDEN))
with tf.name_scope('Relu2') as scope:
model.add(Activation('relu'))
with tf.name_scope('Dropuout2') as scope:
model.add(Dropout(DROPOUT))
with tf.name_scope('Dense3') as scope:
model.add(Dense(NB_CLASSES, activation='softmax'))
class TensorResponseBoard(TensorBoard):
def __init__(self, val_size, img_path, img_size, **kwargs):
super(TensorResponseBoard, self).__init__(**kwargs)
self.val_size = val_size
self.img_path = img_path
self.img_size = img_size
def set_model(self, model):
super(TensorResponseBoard, self).set_model(model)
if self.embeddings_freq and self.embeddings_layer_names:
embeddings = {}
for layer_name in self.embeddings_layer_names:
# initialize tensors which will later be used in `on_epoch_end()` to
# store the response values by feeding the val data through the model
layer = self.model.get_layer(layer_name)
output_dim = layer.output.shape[-1]
response_tensor = tf.Variable(tf.zeros([self.val_size, output_dim]),
name=layer_name + '_response')
embeddings[layer_name] = response_tensor
self.embeddings = embeddings
self.saver = tf.train.Saver(list(self.embeddings.values()))
response_outputs = [self.model.get_layer(layer_name).output
for layer_name in self.embeddings_layer_names]
self.response_model = Model(self.model.inputs, response_outputs)
config = projector.ProjectorConfig()
embeddings_metadata = {layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()}
for layer_name, response_tensor in self.embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = response_tensor.name
# for coloring points by labels
embedding.metadata_path = embeddings_metadata[layer_name]
# for attaching images to the points
embedding.sprite.image_path = self.img_path
embedding.sprite.single_image_dim.extend(self.img_size)
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
super(TensorResponseBoard, self).on_epoch_end(epoch, logs)
if self.embeddings_freq and self.embeddings_ckpt_path:
if epoch % self.embeddings_freq == 0:
# feeding the validation data through the model
val_data = self.validation_data[0]
response_values = self.response_model.predict(val_data)
if len(self.embeddings_layer_names) == 1:
response_values = [response_values]
# record the response at each layers we're monitoring
response_tensors = []
for layer_name in self.embeddings_layer_names:
response_tensors.append(self.embeddings[layer_name])
K.batch_set_value(
list(zip(response_tensors, response_values)))
# finally, save all tensors holding the layer responses
self.saver.save(self.sess, self.embeddings_ckpt_path, epoch)
if self.embeddings_freq and self.embeddings_ckpt_path:
if epoch % self.embeddings_freq == 0:
# feeding the validation data through the model
val_data = self.validation_data[0]
response_values = self.response_model.predict(val_data)
if len(self.embeddings_layer_names) == 1:
response_values = [response_values]
# record the response at each layers we're monitoring
response_tensors = []
for layer_name in self.embeddings_layer_names:
response_tensors.append(self.embeddings[layer_name])
K.batch_set_value(
list(zip(response_tensors, response_values)))
# finally, save all tensors holding the layer responses
self.saver.save(self.sess, self.embeddings_ckpt_path, epoch)
img_array = X_test.reshape(100, 100, 28, 28)
img_array_flat = np.concatenate([np.concatenate([x for x in row], axis=1) for row in img_array])
img = Image.fromarray(np.uint8(255 * (1. - img_array_flat)))
img.save(os.path.join(log_dir, 'images.jpg'))
np.savetxt(os.path.join(log_dir, 'metadata.tsv'), np.where(Y_test)[1], fmt='%d')
# MNIST
tb = TensorResponseBoard(log_dir=log_dir, histogram_freq=1, batch_size=10,
write_graph=True, write_grads=True, write_images=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
embeddings_metadata='metadata.tsv',
val_size=len(X_test), img_path='images.jpg', img_size=[28, 28])
with tf.name_scope('TrainingModel') as scope:
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
callbacks=[tb],
verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment