Skip to content

Instantly share code, notes, and snippets.

@senvey
Created September 11, 2018 21:18
Show Gist options
  • Save senvey/6ddd07aa5fbf2a396349de78be48b67d to your computer and use it in GitHub Desktop.
Save senvey/6ddd07aa5fbf2a396349de78be48b67d to your computer and use it in GitHub Desktop.
TensorFlow
#!/bin/env python
import math
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
print("Tensorflow version " + tf.__version__)
# https://stackoverflow.com/questions/47068709/your-cpu-supports-instructions-that-this-tensorflow-binary-was-not-compiled-to-u
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if __name__ == '__main__':
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# dropout
pkeep = tf.placeholder(tf.float32)
# weights 784=28*28
# update 1: initialize using random numbers other than zeros
W = tf.Variable(tf.truncated_normal([28, 28, 1, 4], stddev=0.1))
# biases
B = tf.Variable(tf.ones([4])/10) # 4 is the number of output channels
# convolutional layers
W1 = tf.Variable(tf.truncated_normal([14, 14, 4, 8], stddev=0.1))
B1 = tf.Variable(tf.ones([8])/10)
W2 = tf.Variable(tf.truncated_normal([7, 7, 8, 12], stddev=0.1))
B2 = tf.Variable(tf.ones([12])/10)
# connected layers
W3 = tf.Variable(tf.truncated_normal([7 * 7 * 12, 200], stddev=0.1))
B3 = tf.Variable(tf.ones([200])/10)
W4 = tf.Variable(tf.truncated_normal([200, 10], stddev=0.1))
B4 = tf.Variable(tf.ones([10])/10)
Y1 = tf.nn.relu(tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME') + B)
Y2 = tf.nn.relu(tf.nn.conv2d(Y1, W1, strides=[1, 2, 2, 1], padding='SAME') + B1)
Y3 = tf.nn.relu(tf.nn.conv2d(Y2, W2, strides=[1, 2, 2, 1], padding='SAME') + B2)
YY = tf.reshape(Y3, shape=[-1, 7 * 7 * 12])
Y4 = tf.nn.relu(tf.matmul(YY, W3) + B3)
Ylogits = tf.matmul(Y4, W4) + B4
Y = tf.nn.softmax(Ylogits)
# cost function
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy) * 100
# optimizer and train
# optimizer = tf.train.GradientDescentOptimizer(0.003)
# train_step = optimizer.minimize(cross_entropy)
lr = tf.placeholder(tf.float32)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# accuracy calculation
is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for i in range(1000):
# adaptive learning rate
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)
batch_X, batch_Y = mnist.train.next_batch(100)
train_data={X: batch_X, Y_: batch_Y, lr: learning_rate}
session.run(train_step, feed_dict=train_data)
a, c = session.run([accuracy, cross_entropy], feed_dict=train_data)
print 'Training accuracy:', a
if i % 100 == 0:
test_data={X: mnist.test.images, Y_: mnist.test.labels}
a, c = session.run([accuracy, cross_entropy], feed_dict=test_data)
print 'Test accuracy:', a
#!/bin/env python
import math
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
print("Tensorflow version " + tf.__version__)
# https://stackoverflow.com/questions/47068709/your-cpu-supports-instructions-that-this-tensorflow-binary-was-not-compiled-to-u
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if __name__ == '__main__':
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# dropout
pkeep = tf.placeholder(tf.float32)
# weights 784=28*28
# update 1: initialize using random numbers other than zeros
W1 = tf.Variable(tf.truncated_normal([28 * 28, 200], stddev=0.1))
# biases
B1 = tf.Variable(tf.zeros([200]))
W2 = tf.Variable(tf.truncated_normal([200, 100], stddev=0.1))
B2 = tf.Variable(tf.zeros([100]))
W3 = tf.Variable(tf.truncated_normal([100, 60], stddev=0.1))
B3 = tf.Variable(tf.zeros([60]))
W4 = tf.Variable(tf.truncated_normal([60, 30], stddev=0.1))
B4 = tf.Variable(tf.zeros([30]))
W5 = tf.Variable(tf.truncated_normal([30, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
XX = tf.reshape(X, [-1, 28 * 28])
Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
# cost function
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy) * 100
# optimizer and train
# optimizer = tf.train.GradientDescentOptimizer(0.003)
# train_step = optimizer.minimize(cross_entropy)
lr = tf.placeholder(tf.float32)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# accuracy calculation
is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for i in range(5000):
# adaptive learning rate
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)
batch_X, batch_Y = mnist.train.next_batch(100)
train_data={X: batch_X, Y_: batch_Y, lr: learning_rate}
session.run(train_step, feed_dict=train_data)
a, c = session.run([accuracy, cross_entropy], feed_dict=train_data)
print 'Training accuracy:', a
test_data={X: mnist.test.images, Y_: mnist.test.labels}
a, c = session.run([accuracy, cross_entropy], feed_dict=test_data)
print 'Test accuracy:', a
@senvey
Copy link
Author

senvey commented Oct 11, 2018

@senvey
Copy link
Author

senvey commented Oct 11, 2018

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment