-
Notifications
You must be signed in to change notification settings - Fork 5k
/
run_MountainCar.py
61 lines (42 loc) · 1.32 KB
/
run_MountainCar.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
"""
Deep Q network,
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import gym
from RL_brain import DeepQNetwork
env = gym.make('MountainCar-v0')
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = DeepQNetwork(n_actions=3, n_features=2, learning_rate=0.001, e_greedy=0.9,
replace_target_iter=300, memory_size=3000,
e_greedy_increment=0.0002,)
total_steps = 0
for i_episode in range(10):
observation = env.reset()
ep_r = 0
while True:
env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
position, velocity = observation_
# the higher the better
reward = abs(position - (-0.5)) # r in [0, 1]
RL.store_transition(observation, action, reward, observation_)
if total_steps > 1000:
RL.learn()
ep_r += reward
if done:
get = '| Get' if observation_[0] >= env.unwrapped.goal_position else '| ----'
print('Epi: ', i_episode,
get,
'| Ep_r: ', round(ep_r, 4),
'| Epsilon: ', round(RL.epsilon, 2))
break
observation = observation_
total_steps += 1
RL.plot_cost()