Forked from Kjeanclaude/VIDEO-QA with Tensorflow 1.2RC0.ipynb
Created
June 20, 2017 06:13
-
-
Save js1972/2d440d456bc238a68b189ccc3dd188f1 to your computer and use it in GitHub Desktop.
EXAMPLE OF VIDEO-QA IMPLEMENTATION WITH TENSORFLOW 1.2RC0
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"deletable": true, | |
"editable": true | |
}, | |
"source": [ | |
"EFFECTIVE TENSORFLOW FOR NON-EXPERTS https://www.youtube.com/watch?v=5DknTFbcGVM" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"deletable": true, | |
"editable": true | |
}, | |
"source": [ | |
"**You could download the latest version of Tensorflow, all branches here :**\n", | |
"\n", | |
"==> **https://www.tensorflow.org/versions/**\n", | |
"\n", | |
"==> ** https://pypi.python.org/pypi/tensorflow#downloads** (Installation : ***pip install package_name***)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": false, | |
"deletable": true, | |
"editable": true | |
}, | |
"outputs": [], | |
"source": [ | |
"### EXAMPLE OF VIDEO-QA IMPLEMENTATION WITH TENSORFLOW 1.2RC0\n", | |
"from __future__ import print_function\n", | |
"import matplotlib.pyplot as plt\n", | |
"import matplotlib.gridspec as gridspec\n", | |
"import numpy as np\n", | |
"import tensorflow as tf\n", | |
"print(\"tf.__version__ : \", tf.__version__)\n", | |
"\n", | |
"from tensorflow import keras\n", | |
"from tensorflow.keras import layers\n", | |
"from tensorflow.keras.applications import InceptionV3\n", | |
"\n", | |
"### Turning frames into a vector, with pre-trained representations\n", | |
"video = keras.Input(shape=(None, 150, 150, 3), name='video')\n", | |
"cnn = InceptionV3(weights='imagenet',\n", | |
" include_top=False,\n", | |
" pooling='avg')\n", | |
"cnn.trainable = False\n", | |
"frame_features = layers.TimeDistributed(cnn)(video)\n", | |
"video_vector = layers.LSTM(256)(frame_features)\n", | |
"\n", | |
"### Turning a sequence of words into a vector\n", | |
"question = keras.Input(shape=(None, ), dtype='int32', name='question')\n", | |
"embedded_words = layers.Embedding(input_voc_size, 256)(question)\n", | |
"question_vector = layers.LSTM(128)(embedded_words)\n", | |
"\n", | |
"### Predicting an answer word\n", | |
"x = layers.concatenate([video_vector, question_vector])\n", | |
"x = layers.dense(128, activation=tf.nn.relu)(x)\n", | |
"predictions = layers.dense(output_voc_size, name='predictions')(x)\n", | |
"\n", | |
"### Setting up the training configuration\n", | |
"model = keras.models.Model([video, question], predictions)\n", | |
"model.compile(optimizer=tf.AdamOptimizer(),\n", | |
" loss=tf.softmax_crossentropy_with_logits)\n", | |
"\n", | |
"\n", | |
"### Leveraging Experiment for distributed training\n", | |
"def experiment_fn(config, params):\n", | |
" model = ...\n", | |
" estimator = model.get_estimator(config=...)\n", | |
" return Experiment(estimator,\n", | |
" train_input_fn=pandas_input_fn(...),\n", | |
" eval_input_fn=pandas_input_fn(...))\n", | |
"\n", | |
"\n", | |
"if __name__ == '__main__':\n", | |
" tf.contrib.train.run_experiment(experiment_fn)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 3", | |
"language": "python", | |
"name": "python3" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.5.3" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment