Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix main #3

Merged
merged 3 commits into from
May 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ install
.vscode
__pycache__
.env
IsaacSim-ros_workspaces/build_ws
IsaacSim-ros_workspaces/build_ws
envs
22 changes: 21 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,20 @@ Real-time RTX lidar stream:
</p>


Custom envs (Office):

<p align="center">
<img width="1200" height="440" src="https://github.com/abizovnuralem/go2_omniverse/assets/33475993/e2e9bdd0-1f40-41a8-86bc-c1097ab3fd7b" alt='Go2'>
</p>


Custom envs (Warehouse):

<p align="center">
<img width="1200" height="440" src="https://github.com/abizovnuralem/go2_omniverse/assets/33475993/5db6f331-60be-40bd-9b4b-ead44064ee44" alt='Go2'>
</p>


## Project RoadMap:
1. PPO balancing algorithm :white_check_mark:
2. Keyboard real time control :white_check_mark:
Expand All @@ -49,7 +63,7 @@ Real-time RTX lidar stream:
7. Foot force data stream :white_check_mark:
8. Real-time control from ROS2
9. Nav2 with Slam_toolbox
10. Bunch of RL-envs for custom dog training
10. Bunch of RL-envs for custom dog training :white_check_mark:

## Your feedback and support mean the world to us.

Expand Down Expand Up @@ -105,6 +119,12 @@ You can control the dog using "WASD" keyboard commands

You can use https://github.com/abizovnuralem/go2_ros2_sdk or https://github.com/unitreerobotics/unitree_ros2 as a basement for your ROS2 setup.


## Select custom env

To use predifined custom envs, you need to download files from https://drive.google.com/drive/folders/1vVGuO1KIX1K6mD6mBHDZGm9nk2vaRyj3?usp=sharing and place them to /envs folder.
Then you can execute it via python main.py --custom_env=office or python main.py --custom_env=warehouse commands

## Easy start

For you convenience I wrote two launche files: run_orbit.sh and build_and_run_orbit.sh
Expand Down
44 changes: 26 additions & 18 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default="Isaac-Velocity-Rough-Unitree-Go2-v0", help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
parser.add_argument("--custom_env", type=str, default="office", help="Setup the environment")


# append RSL-RL cli arguments
Expand Down Expand Up @@ -81,7 +82,7 @@
import usdrt.Sdf


from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg
from omni.isaac.orbit_tasks.utils import get_checkpoint_path
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlVecEnvWrapper
Expand Down Expand Up @@ -117,7 +118,6 @@

from omnigraph import create_front_cam_omnigraph
from agent_cfg import unitree_go2_agent_cfg
from terrain_cfg import ROUGH_TERRAINS_CFG


import rclpy
Expand All @@ -136,22 +136,10 @@ class MySceneCfg(InteractiveSceneCfg):
# ground terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=5,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
project_uvw=True,
),
terrain_type="plane",
debug_vis=False,
)

# robots
robot: ArticulationCfg = MISSING

Expand Down Expand Up @@ -317,7 +305,8 @@ class EventCfg:
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
terrain_levels = CurrTerm(func=mdp.terrain_levels_vel)
pass
# terrain_levels = CurrTerm(func=mdp.terrain_levels_vel)

@configclass
class ViewerCfg:
Expand Down Expand Up @@ -351,7 +340,7 @@ class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg):
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
# curriculum: CurriculumCfg = CurriculumCfg()

def __post_init__(self):
"""Post initialization."""
Expand All @@ -362,6 +351,7 @@ def __post_init__(self):
self.sim.dt = 0.005
self.sim.disable_contact_processing = True
self.sim.physics_material = self.scene.terrain.physics_material

# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
Expand Down Expand Up @@ -436,6 +426,19 @@ def update_meshes_for_cloud2(position_array, origin, rot):
return rotated_vectors


def setup_custom_env():
try:
if (args_cli.custom_env == "warehouse"):
cfg_scene = sim_utils.UsdFileCfg(usd_path="./envs/warehouse.usd")
cfg_scene.func("/World/warehouse", cfg_scene, translation=(0.0, 0.0, 0.0))

if (args_cli.custom_env == "office"):
cfg_scene = sim_utils.UsdFileCfg(usd_path="./envs/office.usd")
cfg_scene.func("/World/office", cfg_scene, translation=(0.0, 0.0, 0.0))
except:
print("Error loading custom environment. You should download custom envs folder from: https://drive.google.com/drive/folders/1vVGuO1KIX1K6mD6mBHDZGm9nk2vaRyj3?usp=sharing")


def main():

# acquire input interface
Expand All @@ -446,6 +449,7 @@ def main():

"""Play with RSL-RL agent."""
# parse configuration

env_cfg = UnitreeGo2CustomEnvCfg()
env_cfg.scene.num_envs = 1

Expand All @@ -456,6 +460,8 @@ def main():
# wrap around environment for rsl-rl
env = RslRlVecEnvWrapper(env)



# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg["experiment_name"])
log_root_path = os.path.abspath(log_root_path)
Expand Down Expand Up @@ -494,6 +500,8 @@ def main():

start_time = time.time()

setup_custom_env()

# simulate environment
while simulation_app.is_running():
# run everything in inference mode
Expand Down