Created
September 23, 2017 14:57
-
-
Save ssophwang/7962ef0a7800b5698ce7cc9b694b7a94 to your computer and use it in GitHub Desktop.
lines.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from scene import * | |
import ui | |
import random | |
import numpy as np | |
import speech | |
import time | |
def gradient_descent_line_fitter(x, y, learning_rate = 0.02, stop_criteria = 1e-5, max_iter = 10000): | |
scaling_factor = 100.0 | |
x = x/scaling_factor | |
y = y/scaling_factor | |
def partial_derivative_b(a, b, x, y): | |
return np.mean(2 * a * x + 2 * b - 2 * y) | |
def partial_derivative_a(a, b, x, y): | |
return np.mean((2 * a * x + 2 * b - 2 * y) * x) | |
a = 0 | |
b = 0 | |
for i in xrange(max_iter): | |
old_a = a | |
old_b = b | |
a += -learning_rate * partial_derivative_a(old_a, old_b, x, y) | |
b += -learning_rate * partial_derivative_b(old_a, old_b, x, y) | |
if i % 100 == 0 and i != 0: | |
print 'a = ' + str(a) + ', b = ' + str(b*scaling_factor) | |
line_path = ui.Path() | |
line_path.move_to(0,0) | |
line_path.line_to(-1024, a*1024) | |
line_path.line_width = 2 | |
test_line = ShapeNode(path=line_path, fill_color='white', stroke_color='white') | |
test_line.position = (512,b*scaling_factor + a*512) | |
if view.scene.line: | |
view.scene.line.remove_from_parent() | |
view.scene.add_child(test_line) | |
view.scene.line = test_line | |
i_num = str(i) | |
speech.say('jumped ' + i_num + ' times', 'en-US', 0.6) | |
time.sleep(0.8) | |
if abs(a - old_a) < stop_criteria and abs(b - old_b) < stop_criteria: | |
break | |
return (a, b*scaling_factor) | |
def stochastic_gradient_descent_line_fitter(x, y, learning_rate = 0.0001, stop_criteria = 1e-5, epochs = 5): | |
scaling_factor = max(np.max(x), np.max(y))/10.0 | |
x_scaled = x/scaling_factor | |
y_scaled = y/scaling_factor | |
def partial_derivative_b(a, b, x, y): | |
return 2*a*x + 2*b - 2*y | |
def partial_derivative_a(a, b, x, y): | |
return (2*a*x + 2*b - 2*y)*x | |
num_points = len(x) | |
a = 0 | |
b = 0 | |
xy_arr = np.array([x_scaled, y_scaled]).T | |
for epoch in range(epochs): | |
np.random.shuffle(xy_arr) | |
shuffled_x = xy_arr[:,0] | |
shuffled_y = xy_arr[:,1] | |
old_batch_a = a | |
old_batch_b = b | |
for i in xrange(num_points): | |
old_a = a | |
old_b = b | |
this_point_x = shuffled_x[i] | |
this_point_y = shuffled_y[i] | |
a += -learning_rate*partial_derivative_a(old_a, old_b, this_point_x, this_point_y) | |
b += -learning_rate*partial_derivative_b(old_a, old_b, this_point_x, this_point_y) | |
if abs(old_a - a) < stop_criteria and abs(old_b - b) < stop_criteria: | |
break | |
return (a, b*scaling_factor) | |
def mini_batch_stochastic_gradient_descent_line_fitter(x, y, learning_rate = 0.01, stop_criteria = 1e-5, epochs = 10, batch_size = 100): | |
scaling_factor = max(np.max(x), np.max(y))/10.0 | |
x_scaled = x/scaling_factor | |
y_scaled = y/scaling_factor | |
def partial_derivative_b(a, b, x, y): | |
return np.mean(2*a*x + 2*b - 2*y) | |
def partial_derivative_a(a, b, x, y): | |
return np.mean((2*a*x + 2*b - 2*y)*x) | |
num_points = len(x) | |
a = 0 | |
b = 0 | |
xy_arr = np.array([x_scaled, y_scaled]).T | |
for epoch in range(epochs): | |
np.random.shuffle(xy_arr) | |
shuffled_x = xy_arr[:,0] | |
shuffled_y = xy_arr[:,1] | |
old_batch_a = a | |
old_batch_b = b | |
for i in xrange(num_points/batch_size): | |
old_a = a | |
old_b = b | |
batch_start_index = i*batch_size | |
batch_end_index = (i+1)*batch_size | |
this_batch_x = shuffled_x[batch_start_index:batch_end_index] | |
this_batch_y = shuffled_y[batch_start_index:batch_end_index] | |
a += -learning_rate*partial_derivative_a(old_a, old_b, this_batch_x, this_batch_y) | |
b += -learning_rate*partial_derivative_b(old_a, old_b, this_batch_x, this_batch_y) | |
print a, b*scaling_factor | |
if abs(old_batch_a - a) < stop_criteria and abs(old_batch_b - b) < stopping_criteria: | |
break | |
return (a, b*scaling_factor) | |
points = [] | |
class MyScene (Scene): | |
def setup(self): | |
self.background_color = 'midnightblue' | |
self.point_size = 8 | |
self.bg_text = LabelNode('Gradient Descent Optimization', font=('Helvetica', 60)) | |
self.bg_text.position =self.size / 2 | |
self.bg_text.color = (.5, .5, .5) | |
self.add_child(self.bg_text) | |
self.line = None | |
def touch_began(self, touch): | |
x, y = touch.location | |
if y > 630: | |
return | |
if self.bg_text: | |
self.bg_text.remove_from_parent() | |
for i in range(100): | |
dist = random.uniform(0, 100) | |
angle = random.uniform(0, 2*math.pi) | |
rand_x = x + dist*math.cos(angle) | |
rand_y = y + dist*math.sin(angle) | |
points.append((rand_x, rand_y)) | |
this_point = ShapeNode(path=ui.Path.oval(0, 0, self.point_size, self.point_size), fill_color='white', stroke_color='clear') | |
this_point.position = (rand_x, rand_y) | |
self.add_child(this_point) | |
@ui.in_background | |
def button_tapped(sender): | |
if len(points) <= 0: | |
print('gimme points') | |
return | |
x = [] | |
y = [] | |
for point in points: | |
x.append(point[0]) | |
y.append(point[1]) | |
np_x = np.array(x, dtype = np.float64) | |
np_y = np.array(y, dtype = np.float64) | |
if switch_gd.value: | |
(a, b) = gradient_descent_line_fitter(np_x, np_y) | |
elif switch_sgd.value: | |
(a, b) = stochastic_gradient_descent_line_fitter(np_x, np_y) | |
else: | |
(a, b) = mini_batch_stochastic_gradient_descent_line_fitter(np_x, np_y) | |
print 'a = ' + str(a) + ', b = ' + str(b) | |
line_path = ui.Path() | |
line_path.move_to(0,0) | |
line_path.line_to(-1024, a*1024) | |
line_path.line_width = 3 | |
test_line = ShapeNode(path=line_path, fill_color='red', stroke_color='red') | |
test_line.position = (512,b + a*512) | |
view.scene.add_child(test_line) | |
speech.say('I am done!') | |
view = SceneView() | |
view.scene = MyScene() | |
button = ui.Button(title = 'Find Line of Best Fit') | |
button.tint_color = (.0, .0, .0) | |
button.background_color = (.72, .67, 1.0) | |
button.corner_radius = 5 | |
button.border_width = 1 | |
button.border_color = (0.6, 0.6, 0.6) | |
button.width = 200 | |
button.x = 400 | |
button.y = 700 | |
button.action = button_tapped | |
view.add_subview(button) | |
def switch_sgd_action(sender): | |
if sender.value == False: | |
sender.value = True | |
else: | |
switch_gd.value = False | |
switch_mbsgd.value = False | |
def switch_gd_action(sender): | |
if sender.value == False: | |
sender.value = True | |
else: | |
switch_sgd.value = False | |
switch_mbsgd.value = False | |
def switch_mbsgd_action(sender): | |
if sender.value == False: | |
sender.value = True | |
else: | |
switch_gd.value = False | |
switch_sgd.value = False | |
switch_sgd = ui.Switch() | |
switch_sgd.x = 600 | |
switch_sgd.y = 60 | |
switch_sgd.value = False | |
switch_sgd.action = switch_sgd_action | |
sgd_label = ui.Label() | |
sgd_label.text = 'Stochastic Gradient Descent:' | |
sgd_label.font = ('Chalkboard SE', 30) | |
sgd_label.text_color = (1, 1, 1) | |
sgd_label.width = 600 | |
sgd_label.x = 50 | |
sgd_label.y = 20 | |
view.add_subview(button) | |
view.add_subview(switch_sgd) | |
view.add_subview(sgd_label) | |
switch_gd = ui.Switch() | |
switch_gd.x = 600 | |
switch_gd.y = 20 | |
switch_gd.value = True | |
switch_gd.action = switch_gd_action | |
gd_label = ui.Label() | |
gd_label.text = 'Batch Gradient Descent:' | |
gd_label.font = ('Chalkboard SE', 30) | |
gd_label.text_color = (1, 1, 1) | |
gd_label.width = 600 | |
gd_label.x = 50 | |
gd_label.y = -20 | |
view.add_subview(switch_gd) | |
view.add_subview(gd_label) | |
switch_mbsgd = ui.Switch() | |
switch_mbsgd.x = 600 | |
switch_mbsgd.y = 100 | |
switch_mbsgd.value = False | |
switch_mbsgd.action = switch_mbsgd_action | |
mbsgd_label = ui.Label() | |
mbsgd_label.text = 'MiniBatch Stochastic Gradient Descent:' | |
mbsgd_label.font = ('Chalkboard SE', 30) | |
mbsgd_label.text_color = (1, 1, 1) | |
mbsgd_label.width = 600 | |
mbsgd_label.x = 50 | |
mbsgd_label.y = 60 | |
view.add_subview(switch_mbsgd) | |
view.add_subview(mbsgd_label) | |
view.present() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment