Last active
May 24, 2017 15:31
-
-
Save lelayf/f81b078a197b30490d6d52ba3f02f0a4 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# coding: utf-8 | |
# In[35]: | |
import numpy as np | |
import pandas as pd | |
import tensorflow as tf | |
import random | |
import math | |
import tempfile | |
import time | |
import os | |
import numpy as np | |
import gc | |
from sklearn.model_selection import KFold, cross_val_score | |
FEAT_COUNT = 57 | |
CONTINUOUS_COLUMNS = ['V' + str(num) for num in range(FEAT_COUNT)] | |
LABEL_COLUMN = 'V' + str(FEAT_COUNT) | |
cols = CONTINUOUS_COLUMNS+[LABEL_COLUMN] | |
df = pd.DataFrame( np.random.randint(-100,100,size=(86, FEAT_COUNT+1))/100, | |
columns = cols) | |
deep_columns = [] | |
for col in CONTINUOUS_COLUMNS: | |
deep_columns.append(tf.contrib.layers.real_valued_column(column_name=col))#,dimension=None)) | |
deep_columns | |
random.seed(a=1337) | |
k_fold = KFold(n_splits=10) | |
# accumulate results across experiments | |
resultz=[] | |
layerz=[] | |
stepz=[] | |
# let's run a 100 experiments | |
for i in range(100): | |
print("EXPERIMENT " + str(i)) | |
resultzz=[] | |
# create a random network topology | |
random.seed(a=1337+i) | |
num_layers = random.randint(2,5) | |
layer_sizes =[] | |
for j in range(num_layers): | |
layer_sizes.append(random.randint(2,1024)) | |
layerz.append(layer_sizes) | |
# train for random number of steps | |
step = random.randint(10,1000) | |
stepz.append(step) | |
j = 0 | |
# 10-fold cross-validation | |
for train_indices, test_indices in k_fold.split(df): | |
# save each model in its own dir to avoid collisions across experiments | |
model_dir = './models/m' + str(i) + "-" + str(j) | |
os.mkdir(model_dir) | |
def input_fn_train(): | |
return dict({k: tf.constant(df.iloc[train_indices][k].values, shape=[df.iloc[train_indices][k].size, 1]) | |
for k in CONTINUOUS_COLUMNS}), tf.constant(df.iloc[train_indices][LABEL_COLUMN].values) | |
def input_fn_eval(): | |
return dict({k: tf.constant(df.iloc[test_indices][k].values, shape=[df.iloc[test_indices][k].size, 1]) | |
for k in CONTINUOUS_COLUMNS}), tf.constant(df.iloc[test_indices][LABEL_COLUMN].values) | |
m = tf.contrib.learn.DNNRegressor(model_dir = model_dir, | |
feature_columns = deep_columns, | |
hidden_units = layer_sizes) | |
# train | |
m.fit(input_fn = input_fn_train , steps = step) | |
time.sleep(1) | |
# eval | |
results = m.evaluate(input_fn = input_fn_eval, steps=1) | |
# desperate and possibly useless attempt to do a bit of cleanup | |
del m | |
gc.collect() | |
# append current fold eval result to experiment results | |
resultzz.append(results['loss']) | |
j = j+1 | |
# append experiment results to global results tracker | |
resultz.append(resultzz) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment