-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_baselines.py
157 lines (131 loc) · 5.55 KB
/
train_baselines.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import gym
from gym.spaces import Box
from sim_env import SimEnv, FeedbackNormalizedSimEnv
#from sim_env_nodrone import SimEnv, FeedbackNormalizedSimEnv
import tensorflow as tf
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from stable_baselines.common import set_global_seeds
from stable_baselines import PPO2, A2C
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines.common.policies import FeedForwardPolicy, MlpLstmPolicy, MlpPolicy
import time
import os
import numpy as np
log_dir = "/auto/homes/jb2270/gym/0005"
def make_env(rank, seed=0):
def _init():
env = SimEnv({
'render': False,
'ep_end_after_n_waypoints': 200,
'max_timesteps_between_checkpoints': 2000,
'dist_waypoint_abort_ep': 2,
'minimum_drone_height': 0.2,
'dist_waypoint_proceed': 0.8,
})
env = Monitor(env, None if rank == 0 else None, allow_early_resets=True)
env.seed(seed + rank)
return env
set_global_seeds(seed)
return _init
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.common.vec_env import VecEnvWrapper
import numpy as np
import time
from collections import deque
import os.path as osp
import json
import csv
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
return True
class SavePeriodicCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SavePeriodicCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
self.model.save(self.save_path)
return True
# Custom MLP policy of three layers of size 128 each
class CustomPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args, **kwargs,
# act_fun=tf.nn.tanh,
net_arch=[dict(pi=[128, 128, 128, 128], vf=[128, 128, 128, 128])],
feature_extraction="mlp")
def train():
env = SubprocVecEnv([make_env(i) for i in range(16)])
#env = VecMonitor(env, log_dir+"monitor.csv")
#env = make_env(0)()
callback = SavePeriodicCallback(check_freq=1000, log_dir=log_dir)
model = PPO2(CustomPolicy, env, verbose=1, nminibatches=8, n_steps=32, cliprange=0.2, gamma=0.99)
#model = A2C(CustomPolicy, env, verbose=1, n_steps=32, gamma=0.95)
model.learn(total_timesteps=int(100e6),callback=callback)
model.save(log_dir + "model")
def run():
model = PPO2.load("./results/best_model_nodrone")
env = make_env(0)() #SubprocVecEnv([make_env(i) for i in range(1)])
obs = env.reset()
cum_rew = 0
for i in range(10000):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
#env.render()
cum_rew += rewards
print(obs, rewards, cum_rew, dones)
if dones:
print("Done", cum_rew)
break
env.close()
if __name__ == '__main__':
#run()
train()