-
Notifications
You must be signed in to change notification settings - Fork 34
/
Copy pathant_goal.py
63 lines (50 loc) · 1.95 KB
/
ant_goal.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import random
import numpy as np
from environments.mujoco.ant import AntEnv
class AntGoalEnv(AntEnv):
def __init__(self, max_episode_steps=200):
self.set_task(self.sample_tasks(1)[0])
self._max_episode_steps = max_episode_steps
self.task_dim = 2
super(AntGoalEnv, self).__init__()
def step(self, action):
self.do_simulation(action, self.frame_skip)
xposafter = np.array(self.get_body_com("torso"))
goal_reward = -np.sum(np.abs(xposafter[:2] - self.goal_pos)) # make it happy, not suicidal
ctrl_cost = .1 * np.square(action).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 0.0
reward = goal_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
done = False
ob = self._get_obs()
return ob, reward, done, dict(
goal_forward=goal_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
task=self.get_task()
)
def sample_tasks(self, num_tasks):
a = np.array([random.random() for _ in range(num_tasks)]) * 2 * np.pi
r = 3 * np.array([random.random() for _ in range(num_tasks)]) ** 0.5
return np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
def set_task(self, task):
self.goal_pos = task
def get_task(self):
return np.array(self.goal_pos)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
class AntGoalOracleEnv(AntGoalEnv):
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.goal_pos,
])