Skip to content

Instantly share code, notes, and snippets.

@sxjscience
Last active July 20, 2020 02:44
Show Gist options
  • Save sxjscience/afd0246559db06d4e639b4dff9111472 to your computer and use it in GitHub Desktop.
Save sxjscience/afd0246559db06d4e639b4dff9111472 to your computer and use it in GitHub Desktop.
import numpy as np
import autogluon as ag
import ray
from ray import tune
import mxnet as mx
from mxnet.gluon import nn, Trainer
from mxnet.util import use_np
def get_mxnet_visible_gpus():
"""Get the number of GPUs that are visible to MXNet.
Returns
-------
ctx_l
The ctx list
"""
import mxnet as mx
gpu_count = 0
while True:
try:
arr = mx.np.array(1.0, ctx=mx.gpu(gpu_count))
arr.asnumpy()
gpu_count += 1
except Exception:
break
return [mx.gpu(i) for i in range(gpu_count)]
@use_np
class Net:
def train_fn(self, args, reporter):
np.random.seed(123)
mx.random.seed(123)
gpu_ctx_l = get_mxnet_visible_gpus()
print(gpu_ctx_l)
net = nn.HybridSequential()
net.add(nn.Dense(args['num_hidden']))
net.add(nn.Activation('relu'))
net.add(nn.Dense(4))
net.hybridize()
net.initialize(ctx=gpu_ctx_l)
trainer = Trainer(net.collect_params(), 'adam', {'learning_rate': args['lr']})
for i in range(10):
with mx.autograd.record():
loss_l = []
for ctx in gpu_ctx_l:
data = mx.np.random.normal(0, 1, (8, 4), ctx=ctx)
out = net(data)
loss = mx.np.square(out - data).sum()
loss_l.append(loss)
for loss in loss_l:
loss.backward()
sum_loss = sum([loss.asnumpy() for loss in loss_l])
reporter(loss=sum_loss, iteration=i)
trainer.step(1.0)
return net
search_space = {
'num_hidden': tune.sample_from(lambda _: np.random.randint(16, 32)),
'lr': tune.sample_from(lambda _: np.random.uniform(1e-3, 1e-2))
}
net = Net()
analysis = tune.run(net.train_fn, config=search_space, num_samples=16, resources_per_trial={'gpu': 2})
print(analysis.dataframe())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment