Created
August 20, 2019 07:42
-
-
Save njanirudh/0dda0bec24b840ab18c6652f753d5404 to your computer and use it in GitHub Desktop.
OpenAI Gym
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### OpenAI GYM ### | |
# 1. https://gym.openai.com/docs/ | |
# 2. https://gym.openai.com/envs/#board_game | |
import gym | |
# Create an env for RL | |
# print(envs.registry.all()) | |
env = gym.make('CartPole-v0') | |
observation = env.reset() | |
# All the action spaces in the environment | |
print(env.action_space) | |
# All the possible observations of the env | |
# ’Box’ implies the array size [1] | |
print(env.observation_space) | |
# Knowing the limits of the observation spaces [1] | |
print(env.observation_space.high) | |
print(env.observation_space.low) | |
# Number of episodes | |
for _ in range(1000): | |
env.render() | |
# Your agent here (this takes random actions) | |
action = env.action_space.sample() | |
observation, reward, done, info = env.step(action) | |
# Reward achieved by the previous actions | |
print(reward) | |
if done: | |
observation = env.reset() | |
env.close() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment