file_path
stringlengths
20
202
content
stringlengths
9
3.85M
size
int64
9
3.85M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
8
993
alphanum_fraction
float64
0.26
0.93
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/controllers/qp_controller.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import Union, List import numpy as np import carb # omni-isaac-a1 from omni.isaac.quadruped.utils.a1_classes import A1Measurement, A1Command # QP controller related from omni.isaac.quadruped.utils.a1_ctrl_states import A1CtrlStates from omni.isaac.quadruped.utils.a1_ctrl_params import A1CtrlParams from omni.isaac.quadruped.utils.a1_desired_states import A1DesiredStates from omni.isaac.quadruped.controllers.a1_robot_control import A1RobotControl from omni.isaac.quadruped.utils.a1_sys_model import A1SysModel from omni.isaac.quadruped.utils.go1_sys_model import Go1SysModel from omni.isaac.quadruped.utils.rot_utils import get_xyz_euler_from_quaternion, get_rotation_matrix_from_euler class A1QPController: """[summary] A1 QP controller as a layer. An implementation of the QP controller[1] References: [1] Bledt, Gerardo, et al. "MIT Cheetah 3: Design and control of a robust, dynamic quadruped robot." 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018. """ def __init__(self, name: str, _simulate_dt: float, waypoint_pose=None) -> None: """Initialize the QP Controller. Args: name {str} -- The name of the layer. _simulated_dt {float} -- rough estimation of the time interval of the control loop """ # rough estimation of the time interval of the control loop self.simulate_dt = _simulate_dt # (nearly) constant control related parameters self._ctrl_params = A1CtrlParams() # control state varibles self._ctrl_states = A1CtrlStates() # control goal state varibles self._desired_states = A1DesiredStates() # robot controller self._root_control = A1RobotControl() # kinematic calculator if name == "A1": self._sys_model = A1SysModel() else: self._sys_model = Go1SysModel() # variables that toggle standing/moving mode self._init_transition = 0 self._prev_transition = 0 # an auto planner for collecting data self.waypoint_tgt_idx = 1 if waypoint_pose is not None: self.waypoint_pose = waypoint_pose else: self.waypoint_pose = [] """ Operations """ def setup(self) -> None: """[summary] Reset the ctrl states. """ self.ctrl_state_reset() def reset(self) -> np.ndarray: """[summary] Reset the ctrl states. """ self.ctrl_state_reset() def set_target_command(self, base_command: Union[List[float], np.ndarray]) -> None: """[summary] Set target base velocity command from joystick Args: base_command{Union[List[float], np.ndarray} -- velocity commands for the robot """ self._current_base_command = base_command def advance(self, dt: float, measurement: A1Measurement, path_follow=False, auto_start=True) -> np.array: """[summary] Perform torque command generation. Args: dt {float} -- Timestep update in the world. measurement {A1Measurement} -- Current measurement from robot. path_follow {bool} -- True if a waypoint is pathed in, false if not auto_start {bool} -- True to start trotting after 1 second automatically, False for start trotting after "Enter" is pressed Returns: np.ndarray -- The desired joint torques for the robot. """ # update controller states from A1Measurement self.update(dt, measurement) if auto_start: if (self._ctrl_states._exp_time > 1) and self._ctrl_states._init_transition == 0: self._ctrl_states._init_transition = 1 # 아주 간단한 P 제어 경로 추적 if path_follow: if self._ctrl_states._exp_time > 6: if self.waypoint_tgt_idx == len(self.waypoint_pose) and self._ctrl_states._init_transition == 1: self._ctrl_states._init_transition = 0 self._ctrl_states._prev_transition = 1 carb.log_info("stop motion") self.waypoint_tgt_idx += 1 elif self.waypoint_tgt_idx < len(self.waypoint_pose) and self._ctrl_states._init_transition == 1: cur_pos = np.array( [self._ctrl_states._root_pos[0], self._ctrl_states._root_pos[1], self._ctrl_states._euler[2]] ) # position에서 x, y, yaw 만 빼낸다. diff_pose = self.waypoint_pose[self.waypoint_tgt_idx] - cur_pos diff_pos = np.array([diff_pose[0], diff_pose[1], 0]) # yaw angle 보정 # fix yaw angle for diff_pos if diff_pose[2] > 1.5 * 3.14: # tgt 3.14, cur -3.14 diff_pose[2] = diff_pose[2] - 6.28 if diff_pose[2] < -1.5 * 3.14: # tgt -3.14, cur 3.14 diff_pose[2] = 6.28 + diff_pose[2] # diff_pos를 body frame으로 변환한 뒤 아주 간단하게 * 10을 해서 경로로 전환한다. # vel command body frame diff_pos_r = self._ctrl_states._rot_mat_z.T @ diff_pos self._current_base_command[0] = 10 * diff_pos_r[0] self._current_base_command[1] = 10 * diff_pos_r[1] # yaw command self._current_base_command[2] = 10 * diff_pose[2] # target pose에 도달하면 다음 target으로 넘어간다. if np.linalg.norm(diff_pose) < 0.1 and self.waypoint_tgt_idx < len(self.waypoint_pose): self.waypoint_tgt_idx += 1 # print(self.waypoint_tgt_idx, " - ", self.waypoint_pose[self.waypoint_tgt_idx]) else: # 모든 target pose에 도달했을 때 # self.waypoint_tgt_idx > len(self.waypoint_pose), in this case the planner is disabled carb.log_info("target reached, back to manual control mode") path_follow = False pass # desired states update # velocity updates # update controller states from target command self._desired_states._root_lin_vel_d[0] = self._current_base_command[0] self._desired_states._root_lin_vel_d[1] = self._current_base_command[1] self._desired_states._root_ang_vel_d[2] = self._current_base_command[2] # euler angle update # _euler_d : desired body orientation in _euler angle self._desired_states._euler_d[2] += self._desired_states._root_ang_vel_d[2] * dt # position locking if self._ctrl_states._init_transition == 1: if np.linalg.norm(self._desired_states._root_lin_vel_d[0]) > 0.05: self._ctrl_params._kp_linear[0] = 0 self._desired_states._root_pos_d[0] = self._ctrl_states._root_pos[0] if np.linalg.norm(self._desired_states._root_lin_vel_d[0]) < 0.05: self._ctrl_params._kp_linear[0] = 5000 if np.linalg.norm(self._desired_states._root_lin_vel_d[1]) > 0.05: self._ctrl_params._kp_linear[1] = 0 self._desired_states._root_pos_d[1] = self._ctrl_states._root_pos[1] if np.linalg.norm(self._desired_states._root_lin_vel_d[1]) < 0.05: self._ctrl_params._kp_linear[1] = 5000 if np.linalg.norm(self._desired_states._root_ang_vel_d[2]) == 0: self._desired_states._euler_d[2] = self._ctrl_states._euler[2] # record position once when moving back into init transition = 0 state if self._ctrl_states._prev_transition == 1 and self._ctrl_states._init_transition < 1: self._ctrl_params._kp_linear[0:2] = np.array([500, 500]) self._desired_states._euler_d[2] = self._ctrl_states._euler[2] self._desired_states._root_pos_d[0:2] = self._ctrl_states._root_pos[0:2] self._desired_states._root_lin_vel_d[0] = 0 self._desired_states._root_lin_vel_d[1] = 0 # make sure this logic only run once self._ctrl_states._prev_transition = self._ctrl_states._init_transition self._root_control.update_plan(self._desired_states, self._ctrl_states, self._ctrl_params, dt) # update_plan updates swing foot target # swing foot control and stance foot control torques = self._root_control.generate_ctrl(self._desired_states, self._ctrl_states, self._ctrl_params) return torques def switch_mode(self): """[summary] toggle between stationary/moving mode""" self._ctrl_states._prev_transition = self._ctrl_states._init_transition self._ctrl_states._init_transition = self._current_base_command[3] """ Internal helpers. """ def ctrl_state_reset(self) -> None: """[summary] reset _ctrl_states and _ctrl_params to non-default values """ # following changes to A1CtrlParams alters the robot gait execution performance self._ctrl_params = A1CtrlParams() self._ctrl_params._kp_linear = np.array([500, 500.0, 1600.0]) self._ctrl_params._kd_linear = np.array([2000.0, 2000.0, 4000.0]) self._ctrl_params._kp_angular = np.array([600.0, 600.0, 0.0]) self._ctrl_params._kd_angular = np.array([0.0, 0.0, 500.0]) kp_foot_x = 11250.0 kp_foot_y = 11250.0 kp_foot_z = 11500.0 self._ctrl_params._kp_foot = np.array( [ [kp_foot_x, kp_foot_y, kp_foot_z], [kp_foot_x, kp_foot_y, kp_foot_z], [kp_foot_x, kp_foot_y, kp_foot_z], [kp_foot_x, kp_foot_y, kp_foot_z], ] ) self._ctrl_params._kd_foot = np.array([0.0, 0.0, 0.0]) self._ctrl_params._km_foot = np.diag([0.7, 0.7, 0.7]) self._ctrl_params._robot_mass = 12.5 self._ctrl_params._foot_force_low = 5.0 self._ctrl_states = A1CtrlStates() self._ctrl_states._counter = 0.0 self._ctrl_states._gait_counter = np.array([0.0, 0.0, 0.0, 0.0]) self._ctrl_states._exp_time = 0.0 def update(self, dt: float, measurement: A1Measurement): """[summary] Fill measurement into _ctrl_states Args: dt {float} -- Timestep update in the world. measurement {A1Measurement} -- Current measurement from robot. """ self._ctrl_states._root_quat[0] = measurement.state.base_frame.quat[3] # w self._ctrl_states._root_quat[1] = measurement.state.base_frame.quat[0] # x self._ctrl_states._root_quat[2] = measurement.state.base_frame.quat[1] # y self._ctrl_states._root_quat[3] = measurement.state.base_frame.quat[2] # z self._ctrl_states._root_pos = measurement.state.base_frame.pos self._ctrl_states._root_lin_vel = measurement.state.base_frame.lin_vel if self._ctrl_states._root_quat[0] < 0: self._ctrl_states._root_quat = -self._ctrl_states._root_quat self._ctrl_states._euler = get_xyz_euler_from_quaternion(self._ctrl_states._root_quat) self._ctrl_states._rot_mat = get_rotation_matrix_from_euler(self._ctrl_states._euler) # according to rl_controler in isaac.anymal, base_frame.ang_vel is in world frame self._ctrl_states._root_ang_vel = self._ctrl_states._rot_mat.T @ measurement.state.base_frame.ang_vel self._ctrl_states._rot_mat_z = get_rotation_matrix_from_euler(np.array([0.0, 0.0, self._ctrl_states._euler[2]])) # still keep the option of using forward diff velocities for i in range(12): if abs(dt > 1e-10): self._ctrl_states._joint_vel[i] = ( measurement.state.joint_pos[i] - self._ctrl_states._joint_pos[i] ) / dt else: self._ctrl_states._joint_vel[i] = 0.0 self._ctrl_states._joint_pos[i] = measurement.state.joint_pos[i] # self._ctrl_states._joint_vel[i] = measurement.state.joint_vel[i] for i, leg in enumerate(["FL", "FR", "RL", "RR"]): # notice the id order of A1SysModel follows that on A1 hardware # [1, 0, 3, 2] -> [FL, FR, RL, RR] swap_i = self._ctrl_params._swap_foot_indices[i] self._ctrl_states._foot_pos_rel[i, :] = self._sys_model.forward_kinematics( swap_i, self._ctrl_states._joint_pos[i * 3 : (i + 1) * 3] ) self._ctrl_states._j_foot[i * 3 : (i + 1) * 3, i * 3 : (i + 1) * 3] = self._sys_model.jacobian( swap_i, self._ctrl_states._joint_pos[i * 3 : (i + 1) * 3] ) self._ctrl_states._foot_pos_abs[i, :] = self._ctrl_states._rot_mat @ self._ctrl_states._foot_pos_rel[i, :] self._ctrl_states._foot_forces[i] = measurement.foot_forces[i] self._ctrl_states._exp_time += dt
13,538
Python
42.957792
135
0.583764
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/tests/test_a1.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test import omni.kit.commands import carb.tokens import asyncio import numpy as np from omni.isaac.core import World from omni.isaac.quadruped.robots.unitree import Unitree from omni.isaac.core.utils.physics import simulate_async from omni.isaac.quadruped.utils.rot_utils import get_xyz_euler_from_quaternion from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import create_new_stage_async class TestA1(omni.kit.test.AsyncTestCase): async def setUp(self): World.clear_instance() await create_new_stage_async() # This needs to be set so that kit updates match physics updates self._physics_rate = 400 carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True) carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int(self._physics_rate)) carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(self._physics_rate)) self._physics_dt = 1 / self._physics_rate self._world = World(stage_units_in_meters=1.0, physics_dt=self._physics_dt, rendering_dt=32 * self._physics_dt) await self._world.initialize_simulation_context_async() self._world.scene.add_default_ground_plane( z_position=0, name="default_ground_plane", prim_path="/World/defaultGroundPlane", static_friction=0.2, dynamic_friction=0.2, restitution=0.01, ) self._base_command = [1.0, 0, 0, 0] self._stage = omni.usd.get_context().get_stage() self._timeline = omni.timeline.get_timeline_interface() self._path_follow = False self._auto_start = True await omni.kit.app.get_app().next_update_async() pass async def tearDown(self): await omni.kit.app.get_app().next_update_async() self._timeline.stop() while omni.usd.get_context().get_stage_loading_status()[2] > 0: print("tearDown, assets still loading, waiting to finish...") await asyncio.sleep(1.0) await omni.kit.app.get_app().next_update_async() pass async def test_a1_add(self): self._path_follow = False self._auto_start = True await self.spawn_a1() await omni.kit.app.get_app().next_update_async() self._a1 = self._a1 = self._world.scene.get_object("A1") await omni.kit.app.get_app().next_update_async() self.assertEqual(self._a1.num_dof, 12) self.assertTrue(get_prim_at_path("/World/A1").IsValid(), True) print("robot articulation passed") await omni.kit.app.get_app().next_update_async() # if dc interface is valid, that means the prim is likely imported correctly async def test_robot_move_command(self): self._path_follow = False self._auto_start = True await self.spawn_a1() await omni.kit.app.get_app().next_update_async() self._a1 = self._a1 = self._world.scene.get_object("A1") self.start_pos = np.array(self._a1.get_world_pose()[0]) await simulate_async(seconds=2.0) self.current_pos = np.array(self._a1.get_world_pose()[0]) print(str(self.current_pos)) delta = np.linalg.norm(self.current_pos[0] - self.start_pos[0]) self.assertTrue(delta > 0.5) pass async def test_robot_move_forward_waypoint(self): self._path_follow = True self._auto_start = True await self.spawn_a1(waypoints=[np.array([0.0, 0.0, 0.0]), np.array([0.5, 0.0, 0.0])]) await omni.kit.app.get_app().next_update_async() self._a1 = self._world.scene.get_object("A1") await omni.kit.app.get_app().next_update_async() self.start_pos = np.array(self._a1.get_world_pose()[0]) await simulate_async(seconds=1.5) self.current_pos = np.array(self._a1.get_world_pose()[0]) delta = self.current_pos - self.start_pos print(str(delta)) # x should be around 1, y, z should be around 0 self.assertAlmostEquals(0.5, delta[0], 0) self.assertTrue(abs(delta[1]) < 0.1) self.assertTrue(abs(delta[2]) < 0.1) async def test_robot_turn_waypoint(self): self._path_follow = False self._auto_start = True # turn 90 degrees await self.spawn_a1() # waypoints=[np.array([0.0, 0.0, -1.57])]) await omni.kit.app.get_app().next_update_async() self._a1 = self._world.scene.get_object("A1") await omni.kit.app.get_app().next_update_async() self._base_command = [0.0, 0.0, 1.0, 0.0] self.start_quat = np.array(self._a1.get_world_pose()[1][[1, 2, 3, 0]]) await simulate_async(seconds=1.5) self.current_quat = np.array(self._a1.get_world_pose()[1][[1, 2, 3, 0]]) self.start_pos = get_xyz_euler_from_quaternion(self.start_quat) self.current_pos = get_xyz_euler_from_quaternion(self.current_quat) delta = np.array(abs(self.current_pos) - abs(self.start_pos)) print(str(delta)) self.assertTrue(abs(delta[2]) < 0.1) self.assertTrue(abs(delta[1]) < 0.1) self.assertTrue(abs(delta[0]) > 3.14 / 4) # Add this test when the controller has better side movement performance # async def test_robot_shift(self): # await self.spawn_a1() # # move side ways at 1.8 m/s (due to tuning, it is likely slower than that) # self._base_command = [0.0, 1.8, 0, 0] # await omni.kit.app.get_app().next_update_async() # self._a1 = self._world.scene.get_object("A1") # await omni.kit.app.get_app().next_update_async() # self.start_pos = np.array(self.dc.get_rigid_body_pose(self._a1._root_handle).p) # await simulate_async(seconds=10.0) # self.current_pos = np.array(self.dc.get_rigid_body_pose(self._a1._root_handle).p) # delta = self.current_pos - self.start_pos # print("delta: " + str(delta)) # print("start: " + str(self.start_pos)) # print("current: " + str(self.current_pos)) # # y should be around 0.5, x, z should be around 0 # self.assertTrue(abs(delta[1]) > 0.5) # self.assertTrue(abs(delta[0]) < 0.1) # self.assertTrue(abs(delta[2]) < 0.1) async def spawn_a1(self, waypoints=None, model="A1"): self._prim_path = "/World/" + model self._a1 = self._world.scene.get_object("A1") if self._a1 is None: self._a1 = self._world.scene.add( Unitree( prim_path=self._prim_path, name=model, position=np.array([0, 0, 0.40]), physics_dt=self._physics_dt, model=model, way_points=waypoints, ) ) self._a1._qp_controller.ctrl_state_reset() self._world.add_physics_callback("a1_advance", callback_fn=self.on_physics_step) await self._world.reset_async() return def on_physics_step(self, step_size): if self._a1 and self._a1._handle: self._a1.advance( dt=step_size, goal=self._base_command, path_follow=self._path_follow, auto_start=self._auto_start )
7,981
Python
37.191387
119
0.617466
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/tests/test_go1.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test import omni.kit.commands import carb.tokens import asyncio import numpy as np from omni.isaac.core import World from omni.isaac.quadruped.robots.unitree import Unitree from omni.isaac.core.utils.stage import create_new_stage_async from omni.isaac.core.utils.prims import get_prim_at_path class TestGo1(omni.kit.test.AsyncTestCase): async def setUp(self): World.clear_instance() await create_new_stage_async() # This needs to be set so that kit updates match physics updates self._physics_rate = 400 carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True) carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int(self._physics_rate)) carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(self._physics_rate)) self._physics_dt = 1 / self._physics_rate self._world = World(stage_units_in_meters=1.0, physics_dt=self._physics_dt, rendering_dt=32 * self._physics_dt) await self._world.initialize_simulation_context_async() self._world.scene.add_default_ground_plane( z_position=0, name="default_ground_plane", prim_path="/World/defaultGroundPlane", static_friction=0.2, dynamic_friction=0.2, restitution=0.01, ) self._base_command = [0.0, 0, 0, 0] self._stage = omni.usd.get_context().get_stage() self._timeline = omni.timeline.get_timeline_interface() self._path_follow = False self._auto_start = True await omni.kit.app.get_app().next_update_async() pass async def tearDown(self): await omni.kit.app.get_app().next_update_async() self._timeline.stop() while omni.usd.get_context().get_stage_loading_status()[2] > 0: print("tearDown, assets still loading, waiting to finish...") await asyncio.sleep(1.0) await omni.kit.app.get_app().next_update_async() pass async def test_go1_add(self): self._path_follow = False self._auto_start = True await self.spawn_go1(model="Go1") await omni.kit.app.get_app().next_update_async() self._go1 = self._world.scene.get_object("Go1") self.assertEqual(self._go1.num_dof, 12) # actually verify this number self.assertTrue(get_prim_at_path("/World/Go1").IsValid(), True) print("articulation check passed") await omni.kit.app.get_app().next_update_async() # if dc interface is valid, that means the prim is likely imported correctly async def spawn_go1(self, waypoints=None, model="Go1"): self._prim_path = "/World/" + model self._go1 = self._world.scene.get_object("Go1") if self._go1 is None: self._go1 = self._world.scene.add( Unitree( prim_path=self._prim_path, name=model, position=np.array([1, 1, 0.45]), physics_dt=self._physics_dt, model=model, way_points=waypoints, ) ) self._go1._qp_controller.ctrl_state_reset() self._world.add_physics_callback("go1_advance", callback_fn=self.on_physics_step) await self._world.reset_async() return def on_physics_step(self, step_size): if self._go1 and self._go1._handle: # print(self._base_command) self._go1.advance( dt=step_size, goal=self._base_command, path_follow=self._path_follow, auto_start=self._auto_start )
4,335
Python
38.063063
119
0.636448
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/a1_desired_states.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np from dataclasses import dataclass, field @dataclass class A1DesiredStates: """ A collection of desired goal states used by the QP agent """ _root_pos_d: np.array = field(default_factory=lambda: np.array([0.0, 0.0, 0.35])) """ control goal paramter: the desired body position in world frame""" _root_lin_vel_d: np.array = field(default_factory=lambda: np.array([0.0, 0.0, 0.0])) """ control goal paramter: the desired body velocity in robot frame """ _euler_d: np.array = field(default_factory=lambda: np.array([0.0, 0.0, 0.0])) """ control goal paramter: the desired body orientation in _euler angle """ _root_ang_vel_d: np.array = field(default_factory=lambda: np.array([0.0, 0.0, 0.0])) """ control goal paramter: the desired body angular velocity """
1,244
Python
41.931033
88
0.721865
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/a1_classes.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from dataclasses import field, dataclass import numpy as np from omni.isaac.quadruped.utils.types import NamedTuple, FrameState @dataclass class A1State(NamedTuple): """The kinematic state of the articulated robot.""" base_frame: FrameState = field(default_factory=lambda: FrameState("root")) """State of base frame""" joint_pos: np.ndarray = field(default_factory=lambda: np.zeros(12)) """Joint positions with shape: (12,)""" joint_vel: np.ndarray = field(default_factory=lambda: np.zeros(12)) """Joint positions with shape: (12,)""" @dataclass class A1Measurement(NamedTuple): """The state of the robot along with the mounted sensor data.""" state: A1State = field(default=A1State) """The state of the robot.""" foot_forces: np.ndarray = field(default_factory=lambda: np.zeros(4)) """Feet contact force of the robot in the order: FL, FR, RL, RR.""" base_lin_acc: np.ndarray = field(default_factory=lambda: np.zeros(3)) """Accelerometer reading from IMU attached to robot's base.""" base_ang_vel: np.ndarray = field(default_factory=lambda: np.zeros(3)) """Gyroscope reading from IMU attached to robot's base.""" @dataclass class A1Command(NamedTuple): """The command on the robot actuators.""" desired_joint_torque: np.ndarray = field(default_factory=lambda: np.zeros(12)) """Desired joint positions of the robot: (12,)"""
1,843
Python
34.461538
82
0.720564
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/actuator_network.py
# python from typing import Union, Tuple import numpy as np from numpy import genfromtxt import torch class LstmSeaNetwork: """Implements an SEA network with LSTM hidden layers.""" def __init__(self): # define the network self._network = None self._hidden_state = torch.zeros((2, 12, 8), requires_grad=False) self._cell_state = torch.zeros((2, 12, 8), requires_grad=False) # default joint position self._default_joint_pos = None """ Properties """ def get_hidden_state(self) -> np.ndarray: if self._hidden_state is None: return np.zeros((12, 8)) else: return self._hidden_state[1].detach().numpy() """ Operations """ def setup(self, path_or_buffer, default_joint_pos: Union[list, np.ndarray]): # load the network from JIT file self._network = torch.jit.load(path_or_buffer) # set the default joint position self._default_joint_pos = np.asarray(default_joint_pos) def reset(self): # reset the hidden state of LSTM with torch.no_grad(): self._hidden_state[:, :, :] = 0.0 self._cell_state[:, :, :] = 0.0 @torch.no_grad() def compute_torques(self, joint_pos, joint_vel, actions) -> Tuple[np.ndarray, np.ndarray]: # create sea network input obs actions = actions.copy() actuator_net_input = torch.zeros((12, 1, 2)) actuator_net_input[:, 0, 0] = torch.from_numpy(actions + self._default_joint_pos - joint_pos) actuator_net_input[:, 0, 1] = torch.from_numpy(np.clip(joint_vel, -20.0, 20)) # call the network torques, (self._hidden_state, self._cell_state) = self._network( actuator_net_input, (self._hidden_state, self._cell_state) ) # return the torque to apply with clipping along with hidden state return torques.detach().clip(-80.0, 80.0).numpy(), self._hidden_state[1].numpy() class SeaNetwork(torch.nn.Module): """Implements a SEA network with MLP hidden layers.""" def __init__(self): super().__init__() # define layer architecture self._sea_network = torch.nn.Sequential( torch.nn.Linear(6, 32), torch.nn.Softsign(), torch.nn.Linear(32, 32), torch.nn.Softsign(), torch.nn.Linear(32, 1), ) # define the delays self._num_delays = 2 self._delays = [8, 3] # define joint histories self._history_size = self._delays[0] self._joint_pos_history = np.zeros((12, self._history_size + 1)) self._joint_vel_history = np.zeros((12, self._history_size + 1)) # define scaling for the actuator network self._sea_vel_scale = 0.4 self._sea_pos_scale = 3.0 self._sea_output_scale = 20.0 self._action_scale = 0.5 # default joint position self._default_joint_pos = None """ Operations """ def setup(self, weights_path: str, default_joint_pos: Union[list, np.ndarray]): # load the weights into network self._load_weights(weights_path) # set the default joint position self._default_joint_pos = np.asarray(default_joint_pos) def reset(self): self._joint_pos_history.fill(0.0) self._joint_vel_history.fill(0.0) def compute_torques(self, joint_pos, joint_vel, actions) -> np.ndarray: self._update_joint_history(joint_pos, joint_vel, actions) return self._compute_sea_torque() """ Internal helpers. """ def _load_weights(self, weights_path: str): # load the data data = genfromtxt(weights_path, delimiter=",") # manually defines the number of neurons in MLP expected_num_params = 6 * 32 + 32 + 32 * 32 + 32 + 32 * 1 + 1 assert data.size == expected_num_params # assign neuron weights to each linear layer idx = 0 for layer in self._sea_network: if not isinstance(layer, torch.nn.Softsign): # layer weights weight = np.reshape( data[idx : idx + layer.in_features * layer.out_features], newshape=(layer.in_features, layer.out_features), ).T layer.weight = torch.nn.Parameter(torch.from_numpy(weight.astype(np.float32))) idx += layer.out_features * layer.in_features # layer biases bias = data[idx : idx + layer.out_features] layer.bias = torch.nn.Parameter(torch.from_numpy(bias.astype(np.float32))) idx += layer.out_features # set the module in eval mode self.eval() def _update_joint_history(self, joint_pos, joint_vel, actions): # convert to numpy (sanity) joint_pos = np.asarray(joint_pos) joint_vel = np.asarray(joint_vel) # compute error in position joint_pos_error = self._action_scale * actions + self._default_joint_pos - joint_pos # store into history self._joint_pos_history[:, : self._history_size] = self._joint_pos_history[:, 1:] self._joint_vel_history[:, : self._history_size] = self._joint_vel_history[:, 1:] self._joint_pos_history[:, self._history_size] = joint_pos_error self._joint_vel_history[:, self._history_size] = joint_vel def _compute_sea_torque(self): inp = torch.zeros((12, 6)) for dof in range(12): inp[dof, 0] = self._sea_vel_scale * self._joint_vel_history[dof, self._history_size - self._delays[0]] inp[dof, 1] = self._sea_vel_scale * self._joint_vel_history[dof, self._history_size - self._delays[1]] inp[dof, 2] = self._sea_vel_scale * self._joint_vel_history[dof, self._history_size] inp[dof, 3] = self._sea_pos_scale * self._joint_pos_history[dof, self._history_size - self._delays[0]] inp[dof, 4] = self._sea_pos_scale * self._joint_pos_history[dof, self._history_size - self._delays[1]] inp[dof, 5] = self._sea_pos_scale * self._joint_pos_history[dof, self._history_size] return self._sea_output_scale * self._sea_network(inp) # EOF
6,248
Python
38.301887
114
0.587388
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/rot_utils.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # python import numba as nb import numpy as np @nb.jit(nopython=True) def get_rotation_matrix_from_quaternion(quat: np.ndarray) -> np.ndarray: """Convert a quaternion to a rotation matrix. Args: quat (np.ndarray): A 4x1 vector in order (w, x, y, z) Returns: np.ndarray: The resulting 3x3 rotation matrix. """ w, x, y, z = quat rot = np.array( [ [2 * (w ** 2 + x ** 2) - 1, 2 * (x * y - w * z), 2 * (x * z + w * y)], [2 * (x * y + w * z), 2 * (w ** 2 + y ** 2) - 1, 2 * (y * z - w * x)], [2 * (x * z - w * y), 2 * (y * z + w * x), 2 * (w ** 2 + z ** 2) - 1], ] ) return rot @nb.jit(nopython=True) def get_xyz_euler_from_quaternion(quat: np.ndarray) -> np.ndarray: """Convert a quaternion to XYZ euler angles. Args: quat (np.ndarray): A 4x1 vector in order (w, x, y, z). Returns: np.ndarray: A 3x1 vector containing (roll, pitch, yaw). """ w, x, y, z = quat y_sqr = y * y t0 = +2.0 * (w * x + y * z) t1 = +1.0 - 2.0 * (x * x + y_sqr) eulerx = np.arctan2(t0, t1) t2 = +2.0 * (w * y - z * x) t2 = +1.0 if t2 > +1.0 else t2 t2 = -1.0 if t2 < -1.0 else t2 eulery = np.arcsin(t2) t3 = +2.0 * (w * z + x * y) t4 = +1.0 - 2.0 * (y_sqr + z * z) eulerz = np.arctan2(t3, t4) result = np.zeros(3) result[0] = eulerx result[1] = eulery result[2] = eulerz return result @nb.jit(nopython=True) def get_quaternion_from_euler(euler: np.ndarray, order: str = "XYZ") -> np.ndarray: """Convert an euler angle to a quaternion based on specified euler angle order. Supported Euler angle orders: {'XYZ', 'YXZ', 'ZXY', 'ZYX', 'YZX', 'XZY'}. Args: euler (np.ndarray): A 3x1 vector with angles in radians. order (str, optional): The specified order of input euler angles. Defaults to "XYZ". Raises: ValueError: If input order is not valid. Reference: [1] https://github.com/mrdoob/three.js/blob/master/src/math/Quaternion.js """ # extract input angles r, p, y = euler # compute constants y = y / 2.0 p = p / 2.0 r = r / 2.0 c3 = np.cos(y) s3 = np.sin(y) c2 = np.cos(p) s2 = np.sin(p) c1 = np.cos(r) s1 = np.sin(r) # convert to quaternion based on order if order == "XYZ": result = np.array( [ c1 * c2 * c3 - s1 * s2 * s3, c1 * s2 * s3 + c2 * c3 * s1, c1 * c3 * s2 - s1 * c2 * s3, c1 * c2 * s3 + s1 * c3 * s2, ] ) if result[0] < 0: result = -result return result elif order == "YXZ": result = np.array( [ c1 * c2 * c3 + s1 * s2 * s3, s1 * c2 * c3 + c1 * s2 * s3, c1 * s2 * c3 - s1 * c2 * s3, c1 * c2 * s3 - s1 * s2 * c3, ] ) return result elif order == "ZXY": result = np.array( [ c1 * c2 * c3 - s1 * s2 * s3, s1 * c2 * c3 - c1 * s2 * s3, c1 * s2 * c3 + s1 * c2 * s3, c1 * c2 * s3 + s1 * s2 * c3, ] ) return result elif order == "ZYX": result = np.array( [ c1 * c2 * c3 + s1 * s2 * s3, s1 * c2 * c3 - c1 * s2 * s3, c1 * s2 * c3 + s1 * c2 * s3, c1 * c2 * s3 - s1 * s2 * c3, ] ) return result elif order == "YZX": result = np.array( [ c1 * c2 * c3 - s1 * s2 * s3, s1 * c2 * c3 + c1 * s2 * s3, c1 * s2 * c3 + s1 * c2 * s3, c1 * c2 * s3 - s1 * s2 * c3, ] ) return result elif order == "XZY": result = np.array( [ c1 * c2 * c3 + s1 * s2 * s3, s1 * c2 * c3 - c1 * s2 * s3, c1 * s2 * c3 - s1 * c2 * s3, c1 * c2 * s3 + s1 * s2 * c3, ] ) return result else: raise ValueError("Input euler angle order is meaningless.") @nb.jit(nopython=True) def get_rotation_matrix_from_euler(euler: np.ndarray, order: str = "XYZ") -> np.ndarray: quat = get_quaternion_from_euler(euler, order) return get_rotation_matrix_from_quaternion(quat) @nb.jit(nopython=True) def quat_multiplication(q: np.ndarray, p: np.ndarray) -> np.ndarray: """Compute the product of two quaternions. Args: q (np.ndarray): First quaternion in order (w, x, y, z). p (np.ndarray): Second quaternion in order (w, x, y, z). Returns: np.ndarray: A 4x1 vector representing a quaternion in order (w, x, y, z). """ quat = np.array( [ p[0] * q[0] - p[1] * q[1] - p[2] * q[2] - p[3] * q[3], p[0] * q[1] + p[1] * q[0] - p[2] * q[3] + p[3] * q[2], p[0] * q[2] + p[1] * q[3] + p[2] * q[0] - p[3] * q[1], p[0] * q[3] - p[1] * q[2] + p[2] * q[1] + p[3] * q[0], ] ) return quat @nb.jit(nopython=True) def skew(vector: np.ndarray) -> np.ndarray: """Convert vector to skew symmetric matrix. This function returns a skew-symmetric matrix to perform cross-product as a matrix multiplication operation, i.e.: np.cross(a, b) = np.dot(skew(a), b) Args: vector (np.ndarray): A 3x1 vector. Returns: np.ndarray: The resluting skew-symmetric matrix. """ mat = np.array([[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[1], vector[0], 0]]) return mat
6,140
Python
27.966981
104
0.484528
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.quadruped.utils.a1_classes import A1State, A1Command, A1Measurement from omni.isaac.quadruped.utils.types import NamedTuple, FrameState from omni.isaac.quadruped.utils.a1_ctrl_states import A1CtrlStates from omni.isaac.quadruped.utils.a1_ctrl_params import A1CtrlParams from omni.isaac.quadruped.utils.a1_desired_states import A1DesiredStates from omni.isaac.quadruped.utils.a1_sys_model import A1SysModel from omni.isaac.quadruped.utils.go1_sys_model import Go1SysModel from omni.isaac.quadruped.utils.actuator_network import LstmSeaNetwork from omni.isaac.quadruped.utils import rot_utils
1,036
Python
53.578945
83
0.833012
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/go1_sys_model.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # """[summary] The kinematics parameters value come from https://github.com/unitreerobotics/unitree_ros/blob/master/robots/a1_description/xacro/const.xacro It calculates the forward kinematics and jacobians of the Unitree A1 robot legs """ import numpy as np from dataclasses import dataclass @dataclass(frozen=True) class Go1SysModel: """Constants and functions related to the forward kinematics of the robot""" """ Properties """ THIGH_OFFSET = 0.08 """constant: the length of the thigh motor""" LEG_OFFSET_X = 0.1881 """constant: x distance from the robot COM to the leg base""" LEG_OFFSET_Y = 0.04675 """constant: y distance from the robot COM to the leg base""" THIGH_LENGTH = 0.213 """constant: length of the leg""" C_FR = 0 """constant: FR leg id in A1's hardware convention""" C_FL = 1 """constant: FL leg id in A1's hardware convention""" C_RR = 2 """constant: RR leg id in A1's hardware convention""" C_RL = 3 """constant: RL leg id in A1's hardware convention""" def __init__(self): """Initializes the class instance. """ pass """ Operations """ def forward_kinematics(self, idx: int, q: np.array) -> np.array: """get the forward_kinematics of the leg Arguments: idx {int}: the index of the leg, must use the A1 hardware convention q {np.array}: the joint angles of a leg """ # these two variables indicates the quadrant of the leg fx = self.LEG_OFFSET_X fy = self.LEG_OFFSET_Y d = self.THIGH_OFFSET if idx == self.C_FR: fy *= -1 d *= -1 elif idx == self.C_FL: pass elif idx == self.C_RR: fx *= -1 fy *= -1 d *= -1 else: fx *= -1 length = self.THIGH_LENGTH q1 = q[0] q2 = q[1] q3 = q[2] p = np.zeros(3) p[0] = fx - length * np.sin(q2 + q3) - length * np.sin(q2) p[1] = ( fy + d * np.cos(q1) + length * np.cos(q2) * np.sin(q1) + length * np.cos(q2) * np.cos(q3) * np.sin(q1) - length * np.sin(q1) * np.sin(q2) * np.sin(q3) ) p[2] = ( d * np.sin(q1) - length * np.cos(q1) * np.cos(q2) - length * np.cos(q1) * np.cos(q2) * np.cos(q3) + length * np.cos(q1) * np.sin(q2) * np.sin(q3) ) return p def jacobian(self, idx: int, q: np.array) -> np.ndarray: """get the jacobian of the leg Arguments: idx {int}: the index of the leg, must use the A1 hardware convention q {np.array}: the joint angles of a leg """ # these two variables indicates the quadrant of the leg fx = self.LEG_OFFSET_X fy = self.LEG_OFFSET_Y d = self.THIGH_OFFSET if idx == self.C_FR: fy *= -1 d *= -1 elif idx == self.C_FL: pass elif idx == self.C_RR: fx *= -1 fy *= -1 d *= -1 else: fx *= -1 length = self.THIGH_LENGTH q1 = q[0] q2 = q[1] q3 = q[2] J = np.zeros([3, 3]) # J[1,1] = 0 J[0, 1] = -length * (np.cos(q2 + q3) + np.cos(q2)) J[0, 2] = -length * np.cos(q2 + q3) J[1, 0] = ( length * np.cos(q1) * np.cos(q2) - d * np.sin(q1) + length * np.cos(q1) * np.cos(q2) * np.cos(q3) - length * np.cos(q1) * np.sin(q2) * np.sin(q3) ) J[1, 1] = -length * np.sin(q1) * (np.sin(q2 + q3) + np.sin(q2)) J[1, 2] = -length * np.sin(q2 + q3) * np.sin(q1) J[2, 0] = ( d * np.cos(q1) + length * np.cos(q2) * np.sin(q1) + length * np.cos(q2) * np.cos(q3) * np.sin(q1) - length * np.sin(q1) * np.sin(q2) * np.sin(q3) ) J[2, 1] = length * np.cos(q1) * (np.sin(q2 + q3) + np.sin(q2)) J[2, 2] = length * np.sin(q2 + q3) * np.cos(q1) return J def foot_vel(self, idx: int, q: np.array, dq: np.array) -> np.array: """get the foot velocity Arguments: idx {int}: the index of the leg, must use the A1 hardware convention q {np.array}: the joint angles of a leg dq {np.array}: the joint angular velocities of a leg """ my_jacobian = self.jacobian(idx, q) vel = my_jacobian @ dq return vel
5,027
Python
29.472727
98
0.519594
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/types.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # enable deferred annotations from __future__ import annotations # python import dataclasses from dataclasses import dataclass, field from typing import List, Union, Dict, Any import numpy as np @dataclass class NamedTuple(object): """[Summary] The backend data structure for data-passing between various modules. In order to support use cases where the data would have mixed types (such as bool/integer/array), we provide a light data-class to capture this formalism while allowing the data to be shared between different modules easily. The objective is to support complex agent designs and support multi-agent environments. The usage of this class is quite similar to that of a dictionary, since underneath, we rely on the key names to "pass" data from one container into another. However, we do not use the dictionary since a data-class helps in providing type hints which is in practice quite useful. Reference: https://stackoverflow.com/questions/51671699/data-classes-vs-typing-namedtuple-primary-use-cases """ def update(self, data: Union[NamedTuple, List[NamedTuple], Dict[str, Any]]): """Update values from another named tuple. Note: Unlike `dict.update(dict)`, this method does not add element(s) to the instance if the key is not present. Arguments: data {Union[NamedTuple, List[NamedTuple], Dict[str, Any]} -- The input data to update values from. Raises: TypeError -- When input data is not of type :class:`NamedTuple` or :class:`List[NamedTuple]`. """ # convert to dictionary if isinstance(data, dict): data_dict = data elif isinstance(data, list): data_dict = {} for d in data: data_dict.update(d.__dict__) elif isinstance(data, NamedTuple): data_dict = data.__dict__ else: name = self.__class__.__name__ raise TypeError( f"Invalid input data type: {type(data)}. Valid: [`{name}`, `List[{name}]`, `Dict[str, Any]`]." ) # iterate over dictionary and add values to matched keys for key, value in data_dict.items(): try: self.__setattr__(key, value) except AttributeError: pass def as_dict(self) -> dict: """Converts the dataclass to dictionary recursively. Returns: dict: Instance information as a dictionary """ return dataclasses.asdict(self) @dataclass class FrameState(NamedTuple): """The state of a kinematic frame. Attributes: name: The name of the frame. pos: The Cartesian position of the frame. quat: The quaternion orientation (x, y, z, w) of the frame. lin_vel: The linear velocity of the frame. ang_vel: The angular velocity of the frame. """ name: str """Frame name.""" pos: np.ndarray = field(default_factory=lambda: np.zeros(3)) """Catersian position of frame.""" quat: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0, 0.0, 1.0])) """Quaternion orientation of frame: (x, y, z, w)""" lin_vel: np.ndarray = field(default_factory=lambda: np.zeros(3)) """Linear velocity of frame.""" ang_vel: np.ndarray = field(default_factory=lambda: np.zeros(3)) """Angular velocity of frame.""" @property def pose(self) -> np.ndarray: """Returns: A numpy array with position and orientation.""" return np.concatenate([self.pos, self.quat]) # EOF
4,051
Python
33.931034
118
0.651691
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/a1_ctrl_params.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np from dataclasses import dataclass, field @dataclass class A1CtrlParams: """ A collection of parameters used by the QP agent """ _robot_mass: float = field(default=16.0) """The mass of the robot""" _swap_foot_indices: np.array = field(default=np.array([1, 0, 3, 2], dtype=int)) """A index list help to convert between A1 hardware leg index order and A1 Isaac Sim leg index order""" _foot_force_low: float = field(default=5.0) """ controller parameter: the low threshold of foot contact force""" _default_foot_pos: np.ndarray = field( default=np.array([[+0.17, +0.15, -0.3], [+0.17, -0.15, -0.3], [-0.17, +0.15, -0.3], [-0.17, -0.15, -0.3]]) ) """ controller parameter: the default foot pos in robot frame when the robot is standing still""" _kp_lin_x: float = field(default=0.0) """ control parameter: the raibert foothold strategy, x position target coefficient""" _kd_lin_x: float = field(default=0.15) """ control parameter: the raibert foothold strategy, x velocity target coefficient""" _kf_lin_x: float = field(default=0.2) """ control parameter: the raibert foothold strategy, x desired velocity target coefficient""" _kp_lin_y: float = field(default=0.0) """ control parameter: the raibert foothold strategy, y position target coefficient""" _kd_lin_y: float = field(default=0.1) """ control parameter: the raibert foothold strategy, y velocity target coefficient""" _kf_lin_y: float = field(default=0.2) """ control parameter: the raibert foothold strategy, y desired velocity target coefficient""" _kp_foot: np.ndarray = field( default=np.array( [[500.0, 500.0, 2000.0], [500.0, 500.0, 2000.0], [500.0, 500.0, 2000.0], [500.0, 500.0, 2000.0]] ) ) """ control parameter: the swing foot position error coefficient""" _kd_foot: np.ndarray = field(default=np.array([0.0, 0.0, 0.0])) """ control parameter: the swing foot velocity error coefficient""" _km_foot: np.ndarray = field(default=np.diag([0.1, 0.1, 0.02])) """ control parameter: the swing foot force amplitude coefficient""" _kp_linear: np.ndarray = field(default=np.array([20.0, 20.0, 2000.0])) """ control parameter: the stance foot force position error coefficient""" _kd_linear: np.ndarray = field(default=np.array([50.0, 50.0, 0.0])) """ control parameter: the stance foot force velocity error coefficient""" _kp_angular: np.ndarray = field(default=np.array([600.0, 600.0, 10.0])) """ control parameter: the stance foot force orientation error coefficient""" _kd_angular: np.ndarray = field(default=np.array([3.0, 3.0, 10.0])) """ control parameter: the stance foot force orientation angular velocity error coefficient""" _torque_gravity: np.ndarray = field(default=np.array([0.80, 0, 0, -0.80, 0, 0, 0.80, 0, 0, -0.80, 0, 0])) """ control parameter: gravity compentation heuristic"""
3,415
Python
43.363636
114
0.675842
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/utils/a1_ctrl_states.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np from dataclasses import dataclass, field @dataclass class A1CtrlStates: """ A collection of variables used by the QP agent """ _counter_per_gait: float = field(default=240.0) """The number of ticks of one gait cycle""" _counter_per_swing: float = field(default=120.0) """The number of ticks of one swing phase (half of the gait cycle)""" _counter: float = field(default=0.0) """A _counter used to determine how many ticks since the simulation starts""" _exp_time: float = field(default=0.0) """Simulation time since the simulation starts""" _gait_counter: np.array = field(default_factory=lambda: np.zeros(4)) """Each leg has its own _counter with initial phase""" _gait_counter_speed: np.array = field(default_factory=lambda: np.zeros(4)) """The speed of gait _counter update""" _root_pos: np.array = field(default_factory=lambda: np.zeros(3)) """feedback state: robot position in world frame""" _root_quat: np.array = field(default_factory=lambda: np.zeros(4)) """feedback state: robot quaternion in world frame""" _root_lin_vel: np.array = field(default_factory=lambda: np.zeros(3)) """feedback state: robot linear velocity in world frame""" _root_ang_vel: np.array = field(default_factory=lambda: np.zeros(3)) """feedback state: robot angular velocity in world frame""" # 오타 같은데, robot frame 같음 _joint_pos: np.array = field(default_factory=lambda: np.zeros(12)) """feedback state: robot motor joint angles""" _joint_vel: np.array = field(default_factory=lambda: np.zeros(12)) """feedback state: robot motor joint angular velocities""" _foot_forces: np.array = field(default_factory=lambda: np.zeros(4)) """feedback state: robot foot contact forces""" _foot_pos_target_world: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot target pos in the world frame""" _foot_pos_target_abs: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot target pos in the absolute frame (rotated robot frame)""" _foot_pos_target_rel: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot target pos in the relative frame (robot frame)""" _foot_pos_start_rel: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot current pos in the relative frame (robot frame)""" _euler: np.array = field(default_factory=lambda: np.zeros(3)) """indirect feedback state: robot _euler angle in world frame""" _rot_mat: np.ndarray = field(default_factory=lambda: np.zeros([3, 3])) """indirect feedback state: robot rotation matrix in world frame""" _rot_mat_z: np.ndarray = field(default_factory=lambda: np.zeros([3, 3])) """indirect feedback state: robot rotation matrix with just the yaw angle in world frame""" # R^{world}_{robot yaw} _foot_pos_abs: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot current pos in the absolute frame (rotated robot frame)""" _foot_pos_rel: np.ndarray = field(default_factory=lambda: np.zeros([4, 3])) """ controller variables: the foot current pos in the relative frame (robot frame)""" _j_foot: np.ndarray = field(default_factory=lambda: np.zeros([12, 12])) """ controller variables: the foot jacobian in the relative frame (robot frame)""" _gait_type: int = field(default=1) """ control variable: type of gait, currently only 1 is defined, which is a troting gait""" _gait_type_last: int = field(default=1) """ control varialbe: saves the previous gait. Reserved for future use""" _contacts: np.array = field(default_factory=lambda: np.array([False] * 4)) """ control varialbe: determine each foot has contact with ground or not""" _early_contacts: np.array = field(default_factory=lambda: np.array([False] * 4)) """ control varialbe: determine each foot has early contact with ground or not (unexpect contact during foot swing)""" _init_transition: int = field(default=0) """ control variable: determine whether the robot should be in walking mode or standstill mode """ _prev_transition: int = field(default=0) """ control variable: previous mode"""
4,796
Python
44.685714
122
0.69558
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/robots/unitree.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni import omni.kit.commands from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path, define_prim from omni.isaac.sensor import _sensor from omni.isaac.core.utils.stage import get_current_stage, get_stage_units from omni.isaac.core.articulations import Articulation from omni.isaac.quadruped.utils.a1_classes import A1State, A1Measurement, A1Command from omni.isaac.quadruped.controllers import A1QPController from omni.isaac.sensor import ContactSensor, IMUSensor from typing import Optional, List from collections import deque import numpy as np import carb class Unitree(Articulation): """For unitree based quadrupeds (A1 or Go1)""" def __init__( self, prim_path: str, name: str = "unitree_quadruped", physics_dt: Optional[float] = 1 / 400.0, usd_path: Optional[str] = None, position: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, model: Optional[str] = "A1", way_points: Optional[np.ndarray] = None, ) -> None: """ [Summary] initialize robot, set up sensors and controller Args: prim_path {str} -- prim path of the robot on the stage name {str} -- name of the quadruped physics_dt {float} -- physics downtime of the controller usd_path {str} -- robot usd filepath in the directory position {np.ndarray} -- position of the robot orientation {np.ndarray} -- orientation of the robot model {str} -- robot model (can be either A1 or Go1) way_points {np.ndarray} -- waypoint and heading of the robot """ self._stage = get_current_stage() self._prim_path = prim_path prim = get_prim_at_path(self._prim_path) if not prim.IsValid(): prim = define_prim(self._prim_path, "Xform") if usd_path: prim.GetReferences().AddReference(usd_path) else: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets server") if model == "A1": asset_path = assets_root_path + "/Isaac/Robots/Unitree/a1.usd" else: asset_path = assets_root_path + "/Isaac/Robots/Unitree/go1.usd" carb.log_warn("asset path is: " + asset_path) prim.GetReferences().AddReference(asset_path) # state, foot_forces, base_lin_acc, base_ang_vel self._measurement = A1Measurement() # desired_joint_torque self._command = A1Command() # base_frame, joint_pos, joint_vel self._state = A1State() # base_frame, joint_pos, joint_vel self._default_a1_state = A1State() if position is not None: self._default_a1_state.base_frame.pos = np.asarray(position) else: self._default_a1_state.base_frame.pos = np.array([0.0, 0.0, 0.0]) self._default_a1_state.base_frame.quat = np.array([0.0, 0.0, 0.0, 1.0]) self._default_a1_state.base_frame.ang_vel = np.array([0.0, 0.0, 0.0]) self._default_a1_state.base_frame.lin_vel = np.array([0.0, 0.0, 0.0]) self._default_a1_state.joint_pos = np.array([0.0, 1.2, -1.8, 0, 1.2, -1.8, 0.0, 1.2, -1.8, 0, 1.2, -1.8]) self._default_a1_state.joint_vel = np.zeros(12) self._goal = np.zeros(3) self.meters_per_unit = get_stage_units() super().__init__(prim_path=self._prim_path, name=name, position=position, orientation=orientation) # contact sensor setup # "FL", "FR", "RL", "RR" self.feet_order = ["FL", "FR", "RL", "RR"] self.feet_path = [ self._prim_path + "/FL_foot", self._prim_path + "/FR_foot", self._prim_path + "/RL_foot", self._prim_path + "/RR_foot", ] self.color = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1)] self._contact_sensors = [None] * 4 for i in range(4): self._contact_sensors[i] = ContactSensor( prim_path=self.feet_path[i] + "/sensor", min_threshold=0, max_threshold=1000000, radius=0.03, dt=physics_dt, ) self.foot_force = np.zeros(4) self.enable_foot_filter = True self._FILTER_WINDOW_SIZE = 20 self._foot_filters = [deque(), deque(), deque(), deque()] # imu sensor setup self.imu_path = self._prim_path + "/imu_link" self._imu_sensor = IMUSensor( prim_path=self.imu_path + "/imu_sensor", name="imu", dt=physics_dt, translation=np.array([0, 0, 0]), orientation=np.array([1, 0, 0, 0]), ) self.base_lin = np.zeros(3) self.ang_vel = np.zeros(3) # Controller self.physics_dt = physics_dt if way_points: self._qp_controller = A1QPController(model, self.physics_dt, way_points) else: self._qp_controller = A1QPController(model, self.physics_dt) self._qp_controller.setup() self._dof_control_modes: List[int] = list() return def set_state(self, state: A1State) -> None: """[Summary] Set the kinematic state of the robot. Args: state {A1State} -- The state of the robot to set. Raises: RuntimeError: When the DC Toolbox interface has not been configured. """ self.set_world_pose(position=state.base_frame.pos, orientation=state.base_frame.quat[[3, 0, 1, 2]]) self.set_linear_velocity(state.base_frame.lin_vel) self.set_angular_velocity(state.base_frame.ang_vel) # joint_state from the DC interface now has the order of # 'FL_hip_joint', 'FR_hip_joint', 'RL_hip_joint', 'RR_hip_joint', # 'FL_thigh_joint', 'FR_thigh_joint', 'RL_thigh_joint', 'RR_thigh_joint', # 'FL_calf_joint', 'FR_calf_joint', 'RL_calf_joint', 'RR_calf_joint' # while the QP controller uses the order of # FL_hip_joint FL_thigh_joint FL_calf_joint # FR_hip_joint FR_thigh_joint FR_calf_joint # RL_hip_joint RL_thigh_joint RL_calf_joint # RR_hip_joint RR_thigh_joint RR_calf_joint # we convert controller order to DC order for setting state self.set_joint_positions( positions=np.asarray(np.array(state.joint_pos.reshape([4, 3]).T.flat), dtype=np.float32) ) self.set_joint_velocities( velocities=np.asarray(np.array(state.joint_vel.reshape([4, 3]).T.flat), dtype=np.float32) ) self.set_joint_efforts(np.zeros_like(state.joint_pos)) return def update_contact_sensor_data(self) -> None: """[summary] Updates processed contact sensor data from the robot feets, store them in member variable foot_force """ # Order: FL, FR, BL, BR for i in range(len(self.feet_path)): frame = self._contact_sensors[i].get_current_frame() if "force" in frame: if self.enable_foot_filter: self._foot_filters[i].append(frame["force"]) if len(self._foot_filters[i]) > self._FILTER_WINDOW_SIZE: self._foot_filters[i].popleft() self.foot_force[i] = np.mean(self._foot_filters[i]) else: self.foot_force[i] = frame["force"] def update_imu_sensor_data(self) -> None: """[summary] Updates processed imu sensor data from the robot body, store them in member variable base_lin and ang_vel """ frame = self._imu_sensor.get_current_frame() self.base_lin = frame["lin_acc"] self.ang_vel = frame["ang_vel"] return def update(self) -> None: """[summary] update robot sensor variables, state variables in A1Measurement """ self.update_contact_sensor_data() self.update_imu_sensor_data() # joint pos and vel from the DC interface self.joint_state = super().get_joints_state() # joint_state from the DC interface now has the order of # 'FL_hip_joint', 'FR_hip_joint', 'RL_hip_joint', 'RR_hip_joint', # 'FL_thigh_joint', 'FR_thigh_joint', 'RL_thigh_joint', 'RR_thigh_joint', # 'FL_calf_joint', 'FR_calf_joint', 'RL_calf_joint', 'RR_calf_joint' # while the QP controller uses the order of # FL_hip_joint FL_thigh_joint FL_calf_joint # FR_hip_joint FR_thigh_joint FR_calf_joint # RL_hip_joint RL_thigh_joint RL_calf_joint # RR_hip_joint RR_thigh_joint RR_calf_joint # we convert DC order to controller order for joint info self._state.joint_pos = np.array(self.joint_state.positions.reshape([3, 4]).T.flat) self._state.joint_vel = np.array(self.joint_state.velocities.reshape([3, 4]).T.flat) # base frame base_pose = self.get_world_pose() self._state.base_frame.pos = base_pose[0] self._state.base_frame.quat = base_pose[1][[1, 2, 3, 0]] self._state.base_frame.lin_vel = self.get_linear_velocity() self._state.base_frame.ang_vel = self.get_angular_velocity() # assign to _measurement obj self._measurement.state = self._state self._measurement.foot_forces = np.asarray(self.foot_force) self._measurement.base_ang_vel = np.asarray(self.ang_vel) self._measurement.base_lin_acc = np.asarray(self.base_lin) return def advance(self, dt, goal, path_follow=False, auto_start=True) -> np.ndarray: """[summary] compute desired torque and set articulation effort to robot joints Argument: dt {float} -- Timestep update in the world. goal {List[int]} -- x velocity, y velocity, angular velocity, state switch path_follow {bool} -- true for following coordinates, false for keyboard control auto_start {bool} -- true for start trotting after 1 sec, false for start trotting after switch mode function is called Returns: np.ndarray -- The desired joint torques for the robot. """ if goal is None: goal = self._goal else: self._goal = goal self.update() self._qp_controller.set_target_command(goal) self._command.desired_joint_torque = self._qp_controller.advance(dt, self._measurement, path_follow, auto_start) # joint_state from the DC interface now has the order of # 'FL_hip_joint', 'FR_hip_joint', 'RL_hip_joint', 'RR_hip_joint', # 'FL_thigh_joint', 'FR_thigh_joint', 'RL_thigh_joint', 'RR_thigh_joint', # 'FL_calf_joint', 'FR_calf_joint', 'RL_calf_joint', 'RR_calf_joint' # while the QP controller uses the order of # FL_hip_joint FL_thigh_joint FL_calf_joint # FR_hip_joint FR_thigh_joint FR_calf_joint # RL_hip_joint RL_thigh_joint RL_calf_joint # RR_hip_joint RR_thigh_joint RR_calf_joint # we convert controller order to DC order for command torque torque_reorder = np.array(self._command.desired_joint_torque.reshape([4, 3]).T.flat) self.set_joint_efforts(np.asarray(torque_reorder, dtype=np.float32)) return self._command def initialize(self, physics_sim_view=None) -> None: """[summary] initialize dc interface, set up drive mode and initial robot state """ super().initialize(physics_sim_view=physics_sim_view) self.get_articulation_controller().set_effort_modes("force") self.get_articulation_controller().switch_control_mode("effort") self.set_state(self._default_a1_state) for i in range(4): self._contact_sensors[i].initialize() return def post_reset(self) -> None: """[summary] post reset articulation and qp_controller """ super().post_reset() for i in range(4): self._contact_sensors[i].post_reset() self._qp_controller.reset() self.set_state(self._default_a1_state) return
12,884
Python
39.518868
127
0.593294
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/robots/unitree_vision.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # python from typing import Optional import numpy as np # omniverse from pxr import UsdGeom, Gf import omni.kit.commands import omni.usd import omni.graph.core as og from omni.isaac.quadruped.robots import Unitree from omni.isaac.core.utils.viewports import set_camera_view from omni.kit.viewport.utility import get_active_viewport, get_viewport_from_window_name from omni.isaac.core.utils.prims import set_targets class UnitreeVision(Unitree): """[Summary] For unitree based quadrupeds (A1 or Go1) with camera """ def __init__( self, prim_path: str, name: str = "unitree_quadruped", physics_dt: Optional[float] = 1 / 400.0, usd_path: Optional[str] = None, position: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, model: Optional[str] = "A1", is_ros2: Optional[bool] = False, way_points: Optional[np.ndarray] = None, ) -> None: """ [Summary] initialize robot, set up sensors and controller Arguments: prim_path {str} -- prim path of the robot on the stage name {str} -- name of the quadruped physics_dt {float} -- physics downtime of the controller usd_path {str} -- robot usd filepath in the directory position {np.ndarray} -- position of the robot orientation {np.ndarray} -- orientation of the robot model {str} -- robot model (can be either A1 or Go1) way_points {np.ndarray} -- waypoints for the robot """ super().__init__(prim_path, name, physics_dt, usd_path, position, orientation, model, way_points) self.image_width = 640 self.image_height = 480 self.cameras = [ # 0name, 1offset, 2orientation, 3hori aperture, 4vert aperture, 5projection, 6focal length, 7focus distance ("/camera_left", Gf.Vec3d(0.2693, 0.025, 0.067), (90, 0, -90), 21, 16, "perspective", 24, 400), ("/camera_right", Gf.Vec3d(0.2693, -0.025, 0.067), (90, 0, -90), 21, 16, "perspective", 24, 400), ] self.camera_graphs = [] # after stage is defined self._stage = omni.usd.get_context().get_stage() # add cameras on the imu link for i in range(len(self.cameras)): # add camera prim camera = self.cameras[i] camera_path = self._prim_path + "/imu_link" + camera[0] camera_prim = UsdGeom.Camera(self._stage.DefinePrim(camera_path, "Camera")) xform_api = UsdGeom.XformCommonAPI(camera_prim) xform_api.SetRotate(camera[2], UsdGeom.XformCommonAPI.RotationOrderXYZ) xform_api.SetTranslate(camera[1]) camera_prim.GetHorizontalApertureAttr().Set(camera[3]) camera_prim.GetVerticalApertureAttr().Set(camera[4]) camera_prim.GetProjectionAttr().Set(camera[5]) camera_prim.GetFocalLengthAttr().Set(camera[6]) camera_prim.GetFocusDistanceAttr().Set(camera[7]) self.is_ros2 = is_ros2 ros_version = "ROS1" ros_bridge_version = "ros_bridge." self.ros_vp_offset = 1 if self.is_ros2: ros_version = "ROS2" ros_bridge_version = "ros2_bridge." # Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers keys = og.Controller.Keys graph_path = "/ROS_" + camera[0].split("/")[-1] (camera_graph, _, _, _) = og.Controller.edit( { "graph_path": graph_path, "evaluator_name": "execution", "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION, }, { keys.CREATE_NODES: [ ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"), ("setViewportResolution", "omni.isaac.core_nodes.IsaacSetViewportResolution"), ("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"), ("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"), ("cameraHelperRgb", "omni.isaac." + ros_bridge_version + ros_version + "CameraHelper"), ("cameraHelperInfo", "omni.isaac." + ros_bridge_version + ros_version + "CameraHelper"), ], keys.CONNECT: [ ("OnPlaybackTick.outputs:tick", "createViewport.inputs:execIn"), ("createViewport.outputs:execOut", "setViewportResolution.inputs:execIn"), ("createViewport.outputs:viewport", "setViewportResolution.inputs:viewport"), ("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"), ("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"), ("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"), ("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"), ("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"), ("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"), ("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"), ("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"), ], keys.SET_VALUES: [ ("createViewport.inputs:name", "Viewport " + str(i + self.ros_vp_offset)), ("setViewportResolution.inputs:height", int(self.image_height)), ("setViewportResolution.inputs:width", int(self.image_width)), ("cameraHelperRgb.inputs:frameId", camera[0]), ("cameraHelperRgb.inputs:nodeNamespace", "/isaac_a1"), ("cameraHelperRgb.inputs:topicName", "camera_forward" + camera[0] + "/rgb"), ("cameraHelperRgb.inputs:type", "rgb"), ("cameraHelperInfo.inputs:frameId", camera[0]), ("cameraHelperInfo.inputs:nodeNamespace", "/isaac_a1"), ("cameraHelperInfo.inputs:topicName", camera[0] + "/camera_info"), ("cameraHelperInfo.inputs:type", "camera_info"), ], }, ) set_targets( prim=self._stage.GetPrimAtPath(graph_path + "/setCamera"), attribute="inputs:cameraPrim", target_prim_paths=[camera_path], ) self.camera_graphs.append(camera_graph) self.viewports = [] for viewport_name in ["Viewport", "Viewport 1", "Viewport 2"]: viewport_api = get_viewport_from_window_name(viewport_name) self.viewports.append(viewport_api) self.set_camera_execution_step = True def dockViewports(self) -> None: """ [Summary] For instantiating and docking view ports """ # first, set main viewport main_viewport = get_active_viewport() set_camera_view(eye=[3.0, 3.0, 3.0], target=[0, 0, 0], camera_prim_path="/OmniverseKit_Persp") main_viewport = omni.ui.Workspace.get_window("Viewport") left_camera_viewport = omni.ui.Workspace.get_window("Viewport 1") right_camera_viewport = omni.ui.Workspace.get_window("Viewport 2") if main_viewport is not None and left_camera_viewport is not None and right_camera_viewport is not None: left_camera_viewport.dock_in(main_viewport, omni.ui.DockPosition.RIGHT, 2 / 3.0) right_camera_viewport.dock_in(left_camera_viewport, omni.ui.DockPosition.RIGHT, 0.5) def setCameraExeutionStep(self, step: np.uint) -> None: """ [Summary] Sets the execution step in the omni.isaac.core_nodes.IsaacSimulationGate node located in the camera sensor pipeline """ for viewport in self.viewports[self.ros_vp_offset :]: if viewport is not None: import omni.syntheticdata._syntheticdata as sd rv = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name) rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path( rv + "IsaacSimulationGate", viewport.get_render_product_path() ) camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path( "PostProcessDispatch" + "IsaacSimulationGate", viewport.get_render_product_path() ) og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(step) og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(step) def update(self) -> None: """ [Summary] Update robot variables from the environment """ super().update() if self.set_camera_execution_step: self.setCameraExeutionStep(1) self.dockViewports() self.set_camera_execution_step = False def advance(self, dt, goal, path_follow=False) -> np.ndarray: """[summary] calls the unitree advance to compute torque Argument: dt {float} -- Timestep update in the world. goal {List[int]} -- x velocity, y velocity, angular velocity, state switch path_follow {bool} -- True for following a set of coordinates, False for keyboard control Returns: np.ndarray -- The desired joint torques for the robot. """ super().advance(dt, goal, path_follow)
10,522
Python
44.752174
123
0.589052
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/robots/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.quadruped.robots.unitree import Unitree from omni.isaac.quadruped.robots.unitree_vision import UnitreeVision from omni.isaac.quadruped.robots.unitree_direct import UnitreeDirect from omni.isaac.quadruped.robots.anymal import Anymal
677
Python
47.428568
76
0.827179
kimsooyoung/legged_robotics/omni.isaac.quadruped/omni/isaac/quadruped/robots/anymal.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni import omni.kit.commands from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path, define_prim from omni.isaac.core.utils.rotations import quat_to_rot_matrix, quat_to_euler_angles, euler_to_rot_matrix from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.articulations import Articulation from omni.isaac.quadruped.utils import LstmSeaNetwork import io from pxr import Gf from typing import Optional, List import numpy as np import torch import carb class Anymal(Articulation): """The ANYmal quadruped""" def __init__( self, prim_path: str, name: str = "anymal", usd_path: Optional[str] = None, position: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, ) -> None: """ [Summary] initialize robot, set up sensors and controller Args: prim_path {str} -- prim path of the robot on the stage name {str} -- name of the quadruped usd_path {str} -- robot usd filepath in the directory position {np.ndarray} -- position of the robot orientation {np.ndarray} -- orientation of the robot """ self._stage = get_current_stage() self._prim_path = prim_path prim = get_prim_at_path(self._prim_path) assets_root_path = get_assets_root_path() if not prim.IsValid(): prim = define_prim(self._prim_path, "Xform") if usd_path: prim.GetReferences().AddReference(usd_path) else: if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") asset_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_c.usd" carb.log_warn("asset path is: " + asset_path) prim.GetReferences().AddReference(asset_path) super().__init__(prim_path=self._prim_path, name=name, position=position, orientation=orientation) self._dof_control_modes: List[int] = list() # Policy file_content = omni.client.read_file(assets_root_path + "/Isaac/Samples/Quadruped/Anymal_Policies/policy_1.pt")[ 2 ] file = io.BytesIO(memoryview(file_content).tobytes()) self._policy = torch.jit.load(file) self._base_vel_lin_scale = 2.0 self._base_vel_ang_scale = 0.25 self._joint_pos_scale = 1.0 self._joint_vel_scale = 0.05 self._action_scale = 0.5 self._default_joint_pos = np.array([0.0, 0.4, -0.8, 0.0, -0.4, 0.8, -0.0, 0.4, -0.8, -0.0, -0.4, 0.8]) self._previous_action = np.zeros(12) self._policy_counter = 0 # Actuator network file_content = omni.client.read_file( assets_root_path + "/Isaac/Samples/Quadruped/Anymal_Policies/sea_net_jit2.pt" )[2] file = io.BytesIO(memoryview(file_content).tobytes()) self._actuator_network = LstmSeaNetwork() self._actuator_network.setup(file, self._default_joint_pos) self._actuator_network.reset() # Height scaner y = np.arange(-0.5, 0.6, 0.1) x = np.arange(-0.8, 0.9, 0.1) grid_x, grid_y = np.meshgrid(x, y) self._scan_points = np.zeros((grid_x.size, 3)) self._scan_points[:, 0] = grid_x.transpose().flatten() self._scan_points[:, 1] = grid_y.transpose().flatten() self.physx_query_interface = omni.physx.get_physx_scene_query_interface() self._query_info = [] def _hit_report_callback(self, hit): current_hit_body = hit.rigid_body if "/World/GroundPlane" in current_hit_body: self._query_info.append(hit.distance) return True def _compute_observation(self, command): """[summary] compute the observation vector for the policy Argument: command {np.ndarray} -- the robot command (v_x, v_y, w_z) Returns: np.ndarray -- The observation vector. """ lin_vel_I = self.get_linear_velocity() ang_vel_I = self.get_angular_velocity() pos_IB, q_IB = self.get_world_pose() R_IB = quat_to_rot_matrix(q_IB) R_BI = R_IB.transpose() lin_vel_b = np.matmul(R_BI, lin_vel_I) ang_vel_b = np.matmul(R_BI, ang_vel_I) gravity_b = np.matmul(R_BI, np.array([0.0, 0.0, -1.0])) obs = np.zeros(235) # Base lin vel obs[:3] = self._base_vel_lin_scale * lin_vel_b # Base ang vel obs[3:6] = self._base_vel_ang_scale * ang_vel_b # Gravity obs[6:9] = gravity_b # Command obs[9] = self._base_vel_lin_scale * command[0] obs[10] = self._base_vel_lin_scale * command[1] obs[11] = self._base_vel_ang_scale * command[2] # Joint states # joint_state from the DC interface now has the order of # 'FL_hip_joint', 'FR_hip_joint', 'RL_hip_joint', 'RR_hip_joint', # 'FL_thigh_joint', 'FR_thigh_joint', 'RL_thigh_joint', 'RR_thigh_joint', # 'FL_calf_joint', 'FR_calf_joint', 'RL_calf_joint', 'RR_calf_joint' # while the learning controller uses the order of # FL_hip_joint FL_thigh_joint FL_calf_joint # FR_hip_joint FR_thigh_joint FR_calf_joint # RL_hip_joint RL_thigh_joint RL_calf_joint # RR_hip_joint RR_thigh_joint RR_calf_joint # Convert DC order to controller order for joint info current_joint_pos = self.get_joint_positions() current_joint_vel = self.get_joint_velocities() current_joint_pos = np.array(current_joint_pos.reshape([3, 4]).T.flat) current_joint_vel = np.array(current_joint_vel.reshape([3, 4]).T.flat) obs[12:24] = self._joint_pos_scale * (current_joint_pos - self._default_joint_pos) obs[24:36] = self._joint_vel_scale * current_joint_vel obs[36:48] = self._previous_action # height_scanner rpy = -quat_to_euler_angles(q_IB) rpy[:2] = 0.0 yaw_rot = np.array(Gf.Matrix3f(euler_to_rot_matrix(rpy))) world_scan_points = np.matmul(yaw_rot, self._scan_points.T).T + pos_IB for i in range(world_scan_points.shape[0]): self._query_info.clear() self.physx_query_interface.raycast_all( tuple(world_scan_points[i]), (0.0, 0.0, -1.0), 100, self._hit_report_callback ) if self._query_info: distance = min(self._query_info) obs[48 + i] = np.clip(distance - 0.5, -1.0, 1.0) else: print("No hit") return obs def advance(self, dt, command): """[summary] compute the desired torques and apply them to the articulation Argument: dt {float} -- Timestep update in the world. command {np.ndarray} -- the robot command (v_x, v_y, w_z) """ if self._policy_counter % 4 == 0: obs = self._compute_observation(command) with torch.no_grad(): obs = torch.from_numpy(obs).view(1, -1).float() self.action = self._policy(obs).detach().view(-1).numpy() self._previous_action = self.action.copy() self._dc_interface.wake_up_articulation(self._handle) # joint_state from the DC interface now has the order of # 'FL_hip_joint', 'FR_hip_joint', 'RL_hip_joint', 'RR_hip_joint', # 'FL_thigh_joint', 'FR_thigh_joint', 'RL_thigh_joint', 'RR_thigh_joint', # 'FL_calf_joint', 'FR_calf_joint', 'RL_calf_joint', 'RR_calf_joint' # while the learning controller uses the order of # FL_hip_joint FL_thigh_joint FL_calf_joint # FR_hip_joint FR_thigh_joint FR_calf_joint # RL_hip_joint RL_thigh_joint RL_calf_joint # RR_hip_joint RR_thigh_joint RR_calf_joint # Convert DC order to controller order for joint info current_joint_pos = self.get_joint_positions() current_joint_vel = self.get_joint_velocities() current_joint_pos = np.array(current_joint_pos.reshape([3, 4]).T.flat) current_joint_vel = np.array(current_joint_vel.reshape([3, 4]).T.flat) joint_torques, _ = self._actuator_network.compute_torques( current_joint_pos, current_joint_vel, self._action_scale * self.action ) # finally convert controller order to DC order for command torque torque_reorder = np.array(joint_torques.reshape([4, 3]).T.flat) self._dc_interface.set_articulation_dof_efforts(self._handle, torque_reorder) self._policy_counter += 1 def initialize(self, physics_sim_view=None) -> None: """[summary] initialize the dc interface, set up drive mode """ super().initialize(physics_sim_view=physics_sim_view) self.get_articulation_controller().set_effort_modes("force") self.get_articulation_controller().switch_control_mode("effort") def post_reset(self) -> None: """[summary] post reset articulation """ super().post_reset()
9,695
Python
38.255061
120
0.597834
kimsooyoung/legged_robotics/omni.isaac.quadruped/docs/CHANGELOG.md
# Changelog ## [1.3.0] - 2023-02-01 ### Removed - Removed Quadruped class - Removed dynamic control extension dependency - Used omni.isaac.sensor classes for Contact and IMU sensors ## [1.2.2] - 2022-12-10 ### Fixed - Updated camera pipeline with writers ## [1.2.1] - 2022-11-03 ### Fixed - Incorrect viewport name issue - Viewports not docking correctly ## [1.2.0] - 2022-08-30 ### Changed - Remove direct legacy viewport calls ## [1.1.2] - 2022-05-19 ### Changed - Updated unitree vision class to use OG ROS nodes - Updated ROS1/ROS2 quadruped standalone samples to use OG ROS nodes ## [1.1.1] - 2022-05-15 ### Fixed - DC joint order change related fixes. ## [1.1.0] - 2022-05-05 ### Added - added the ANYmal robot ## [1.0.2] - 2022-04-21 ### Changed - decoupled sensor testing from A1 and Go1 unit test - fixed contact sensor bug in example and standalone ## [1.0.1] - 2022-04-20 ### Changed - Replaced find_nucleus_server() with get_assets_root_path() ## [1.0.0] - 2022-04-13 ### Added - quadruped class, unitree class (support both a1, go1), unitree vision class (unitree class with stereo cameras), and unitree direct class (unitree class that subscribe to external controllers) - quadruped controllers - documentations and unit tests - quadruped standalone with ros 1 and ros 2 vio examples
1,318
Markdown
21.355932
194
0.704856
kimsooyoung/legged_robotics/omni.isaac.quadruped/docs/index.rst
Quadruped Robots [omni.isaac.quadruped] ####################################### Quadruped ====================== .. automodule:: omni.isaac.quadruped.quadruped :inherited-members: :members: :undoc-members: :exclude-members: Quadruped Controller ======================= .. automodule:: omni.isaac.quadruped.controllers :inherited-members: :imported-members: :members: :undoc-members: :exclude-members: Quadruped Robots ====================== .. automodule:: omni.isaac.quadruped.robots :inherited-members: :imported-members: :members: :undoc-members: :exclude-members: Quadruped Utilities ======================== .. automodule:: omni.isaac.quadruped.utils :inherited-members: :imported-members: :members: :undoc-members: :exclude-members:
829
reStructuredText
17.043478
48
0.576598
kimsooyoung/rb_issac_tutorial/data_test_ur.py
import h5py import numpy as np import pylab as plt file_name = "./ur_bin_filling.hdf5" # file_name = "./ur_bin_palleting.hdf5" with h5py.File(file_name, 'r') as f: print(f.keys()) print(f"keys: {f['isaac_dataset'].keys()}") print(f"sim_time: {f['isaac_dataset']['sim_time'].shape}") print(f"joint_positions: {f['isaac_dataset']['joint_positions'].shape}") print(f"joint_velocities: {f['isaac_dataset']['joint_velocities'].shape}") print(f"camera_images: {f['isaac_dataset']['camera_images'].keys()}") sim_time = f['isaac_dataset']['sim_time'][:] joint_positions = f['isaac_dataset']['joint_positions'][:] joint_velocities = f['isaac_dataset']['joint_velocities'][:] if file_name == "./ur_bin_filling.hdf5": ee_camera = f['isaac_dataset']['camera_images']['ee_camera'][:] side_camera = f['isaac_dataset']['camera_images']['side_camera'][:] front_camera = f['isaac_dataset']['camera_images']['front_camera'][:] plt.figure(1) plt.imshow(ee_camera[7]) plt.figure(2) plt.imshow(side_camera[7]) plt.figure(3) plt.imshow(front_camera[7]) elif file_name == "./ur_bin_palleting.hdf5": ee_camera = f['isaac_dataset']['camera_images']['ee_camera'][:] left_camera = f['isaac_dataset']['camera_images']['left_camera'][:] right_camera = f['isaac_dataset']['camera_images']['right_camera'][:] front_camera = f['isaac_dataset']['camera_images']['front_camera'][:] back_camera = f['isaac_dataset']['camera_images']['back_camera'][:] plt.figure(1) plt.imshow(ee_camera[1]) plt.figure(2) plt.imshow(left_camera[1]) plt.figure(3) plt.imshow(right_camera[1]) plt.figure(4) plt.imshow(front_camera[1]) plt.figure(5) plt.imshow(back_camera[1]) print(f"sim_time: {sim_time}") print(f"joint_positions[0]: {joint_positions[0]}") print(f"joint_velocities[0]: {joint_velocities[0]}") plt.show()
2,038
Python
30.36923
78
0.592738
kimsooyoung/rb_issac_tutorial/data_test_franka.py
import h5py import numpy as np import pylab as plt file_name = "./franka_nuts_basic.hdf5" # file_name = "./franka_bolts_nuts_table.hdf5" with h5py.File(file_name, 'r') as f: print(f.keys()) print(f"keys: {f['isaac_dataset'].keys()}") print(f"sim_time: {f['isaac_dataset']['sim_time'].shape}") print(f"joint_positions: {f['isaac_dataset']['joint_positions'].shape}") print(f"joint_velocities: {f['isaac_dataset']['joint_velocities'].shape}") print(f"camera_images: {f['isaac_dataset']['camera_images'].keys()}") print(f"camera_images: {f['isaac_dataset']['camera_images']['hand_camera'].shape}") print(f"camera_images: {f['isaac_dataset']['camera_images']['top_camera'].shape}") print(f"camera_images: {f['isaac_dataset']['camera_images']['front_camera'].shape}") sim_time = f['isaac_dataset']['sim_time'][:] joint_positions = f['isaac_dataset']['joint_positions'][:] joint_velocities = f['isaac_dataset']['joint_velocities'][:] hand_camera = f['isaac_dataset']['camera_images']['hand_camera'][:] top_camera = f['isaac_dataset']['camera_images']['top_camera'][:] front_camera = f['isaac_dataset']['camera_images']['front_camera'][:] print(f"sim_time: {sim_time}") print(f"joint_positions[0]: {joint_positions[0]}") print(f"joint_velocities[0]: {joint_velocities[0]}") plt.figure(1) plt.imshow(hand_camera[7]) plt.figure(2) plt.imshow(top_camera[7]) plt.figure(3) plt.imshow(front_camera[7]) plt.show()
1,512
Python
34.186046
88
0.636905
kimsooyoung/rb_issac_tutorial/data_test.py
import h5py import numpy as np import pylab as plt file_name = "/home/kimsooyoung/Documents/cam_test.hdf5" with h5py.File(file_name, 'r') as f: # data = f['my_dataset'][:] print(f.keys()) print(f['isaac_save_data'].keys()) print(f['isaac_save_data']['image'].shape) print(f['isaac_save_data']['sim_time'].shape) print(type(f['isaac_save_data']['image'][0])) print(f['isaac_save_data']['sim_time'][0]) plt.imshow(f['isaac_save_data']['image'][0]) plt.show()
497
Python
25.210525
55
0.617706
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/ik_solver.py
from omni.isaac.motion_generation import ArticulationKinematicsSolver, LulaKinematicsSolver from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.articulations import Articulation from typing import Optional import carb class KinematicsSolver(ArticulationKinematicsSolver): def __init__(self, robot_articulation: Articulation, end_effector_frame_name: Optional[str] = None) -> None: #TODO: change the config path # desktop # my_path = "/home/kimsooyoung/Documents/IsaacSim/rb_issac_tutorial/RoadBalanceEdu/ManipFollowTarget/" # self._urdf_path = "/home/kimsooyoung/Downloads/USD/cobotta_pro_900/" # lactop self._desc_path = "/home/kimsooyoung/Documents/IssacSimTutorials/rb_issac_tutorial/RoadBalanceEdu/ManipFollowTarget/" self._urdf_path = "/home/kimsooyoung/Downloads/Source/cobotta_pro_900/" self._kinematics = LulaKinematicsSolver( robot_description_path=self._desc_path+"rmpflow/robot_descriptor.yaml", urdf_path=self._urdf_path+"cobotta_pro_900.urdf" ) if end_effector_frame_name is None: end_effector_frame_name = "onrobot_rg6_base_link" ArticulationKinematicsSolver.__init__(self, robot_articulation, self._kinematics, end_effector_frame_name) return
1,388
Python
45.299998
125
0.698127
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/extension.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import os from omni.isaac.examples.base_sample import BaseSampleExtension from .lula_kinematic_solver import LULAKinematicSolverExample from .franka_test import FrankaIK """ This file serves as a basic template for the standard boilerplate operations that make a UI-based extension appear on the toolbar. This implementation is meant to cover most use-cases without modification. Various callbacks are hooked up to a seperate class UIBuilder in .ui_builder.py Most users will be able to make their desired UI extension by interacting solely with UIBuilder. This class sets up standard useful callback functions in UIBuilder: on_menu_callback: Called when extension is opened on_timeline_event: Called when timeline is stopped, paused, or played on_physics_step: Called on every physics step on_stage_event: Called when stage is opened or closed cleanup: Called when resources such as physics subscriptions should be cleaned up build_ui: User function that creates the UI they want. """ class Extension(BaseSampleExtension): def on_startup(self, ext_id: str): super().on_startup(ext_id) super().start_extension( menu_name="RoadBalanceEdu", submenu_name="AddingNewManip", name="LULAKinematicSolver", title="LULAKinematicSolver", doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/core_api_tutorials/tutorial_core_hello_world.html", overview="This Example introduces the user on how to do cool stuff with Isaac Sim through scripting in asynchronous mode.", file_path=os.path.abspath(__file__), sample=LULAKinematicSolverExample(), # sample=FrankaIK(), ) return
2,173
Python
42.479999
135
0.743212
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/franka_test.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample # This extension has franka related tasks and controllers as well from omni.isaac.franka import Franka from omni.isaac.core.objects import DynamicCuboid from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.franka.tasks import PickPlace from omni.isaac.core.tasks import BaseTask from omni.isaac.motion_generation import (ArticulationKinematicsSolver, ArticulationMotionPolicy, LulaKinematicsSolver, RmpFlow, interface_config_loader) import numpy as np class FrankaIK(BaseSample): def __init__(self) -> None: super().__init__() self._sim_step = 0 return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() self._franka = self._world.scene.add( Franka( prim_path="/World/Fancy_Franka", name="fancy_franka" )) return async def setup_post_load(self): self._world = self.get_world() self._franka = self._world.scene.get_object("fancy_franka") self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) # define LulaKinematicsSolver & ArticulationKinematicsSolver kinematics_config = interface_config_loader.load_supported_lula_kinematics_solver_config("Franka") self._kine_solver = LulaKinematicsSolver(**kinematics_config) self._art_kine_solver = ArticulationKinematicsSolver(self._franka, self._kine_solver, "right_gripper") # acquire controller for action applying self._articulation_controller = self._franka.get_articulation_controller() self._action = None self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_post_reset(self): await self._world.play_async() return def physics_step(self, step_size): self._sim_step += 1 if (self._sim_step > 100) and (self._action is None): target_pos = np.array([0.5, 0.0, 0.5]) robot_base_translation, robot_base_orientation = self._franka.get_world_pose() self._kine_solver.set_robot_base_pose(robot_base_translation, robot_base_orientation) self._action, ik_success = self._art_kine_solver.compute_inverse_kinematics( target_pos ) # Apply action if ik_success: print("IK Great") else: print("IK failed") elif (self._sim_step > 100) and (self._action is not None): self._franka.apply_action(self._action) return
3,304
Python
37.430232
110
0.64316
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/ui_builder.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import os from typing import List import omni.ui as ui from omni.isaac.ui.element_wrappers import ( Button, CheckBox, CollapsableFrame, ColorPicker, DropDown, FloatField, IntField, StateButton, StringField, TextBlock, XYPlot, ) from omni.isaac.ui.ui_utils import get_style class UIBuilder: def __init__(self): # Frames are sub-windows that can contain multiple UI elements self.frames = [] # UI elements created using a UIElementWrapper from omni.isaac.ui.element_wrappers self.wrapped_ui_elements = [] ################################################################################### # The Functions Below Are Called Automatically By extension.py ################################################################################### def on_menu_callback(self): """Callback for when the UI is opened from the toolbar. This is called directly after build_ui(). """ pass def on_timeline_event(self, event): """Callback for Timeline events (Play, Pause, Stop) Args: event (omni.timeline.TimelineEventType): Event Type """ pass def on_physics_step(self, step): """Callback for Physics Step. Physics steps only occur when the timeline is playing Args: step (float): Size of physics step """ pass def on_stage_event(self, event): """Callback for Stage Events Args: event (omni.usd.StageEventType): Event Type """ pass def cleanup(self): """ Called when the stage is closed or the extension is hot reloaded. Perform any necessary cleanup such as removing active callback functions Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called """ # None of the UI elements in this template actually have any internal state that needs to be cleaned up. # But it is best practice to call cleanup() on all wrapped UI elements to simplify development. for ui_elem in self.wrapped_ui_elements: ui_elem.cleanup() def build_ui(self): """ Build a custom UI tool to run your extension. This function will be called any time the UI window is closed and reopened. """ # Create a UI frame that prints the latest UI event. self._create_status_report_frame() # Create a UI frame demonstrating simple UI elements for user input self._create_simple_editable_fields_frame() # Create a UI frame with different button types self._create_buttons_frame() # Create a UI frame with different selection widgets self._create_selection_widgets_frame() # Create a UI frame with different plotting tools self._create_plotting_frame() def _create_status_report_frame(self): self._status_report_frame = CollapsableFrame("Status Report", collapsed=False) with self._status_report_frame: with ui.VStack(style=get_style(), spacing=5, height=0): self._status_report_field = TextBlock( "Last UI Event", num_lines=3, tooltip="Prints the latest change to this UI", include_copy_button=True, ) def _create_simple_editable_fields_frame(self): self._simple_fields_frame = CollapsableFrame("Simple Editable Fields", collapsed=False) with self._simple_fields_frame: with ui.VStack(style=get_style(), spacing=5, height=0): int_field = IntField( "Int Field", default_value=1, tooltip="Type an int or click and drag to set a new value.", lower_limit=-100, upper_limit=100, on_value_changed_fn=self._on_int_field_value_changed_fn, ) self.wrapped_ui_elements.append(int_field) float_field = FloatField( "Float Field", default_value=1.0, tooltip="Type a float or click and drag to set a new value.", step=0.5, format="%.2f", lower_limit=-100.0, upper_limit=100.0, on_value_changed_fn=self._on_float_field_value_changed_fn, ) self.wrapped_ui_elements.append(float_field) def is_usd_or_python_path(file_path: str): # Filter file paths shown in the file picker to only be USD or Python files _, ext = os.path.splitext(file_path.lower()) return ext == ".usd" or ext == ".py" string_field = StringField( "String Field", default_value="Type Here or Use File Picker on the Right", tooltip="Type a string or use the file picker to set a value", read_only=False, multiline_okay=False, on_value_changed_fn=self._on_string_field_value_changed_fn, use_folder_picker=True, item_filter_fn=is_usd_or_python_path, ) self.wrapped_ui_elements.append(string_field) def _create_buttons_frame(self): buttons_frame = CollapsableFrame("Buttons Frame", collapsed=False) with buttons_frame: with ui.VStack(style=get_style(), spacing=5, height=0): button = Button( "Button", "CLICK ME", tooltip="Click This Button to activate a callback function", on_click_fn=self._on_button_clicked_fn, ) self.wrapped_ui_elements.append(button) state_button = StateButton( "State Button", "State A", "State B", tooltip="Click this button to transition between two states", on_a_click_fn=self._on_state_btn_a_click_fn, on_b_click_fn=self._on_state_btn_b_click_fn, physics_callback_fn=None, # See Loaded Scenario Template for example usage ) self.wrapped_ui_elements.append(state_button) check_box = CheckBox( "Check Box", default_value=False, tooltip=" Click this checkbox to activate a callback function", on_click_fn=self._on_checkbox_click_fn, ) self.wrapped_ui_elements.append(check_box) def _create_selection_widgets_frame(self): self._selection_widgets_frame = CollapsableFrame("Selection Widgets", collapsed=False) with self._selection_widgets_frame: with ui.VStack(style=get_style(), spacing=5, height=0): def dropdown_populate_fn(): return ["Option A", "Option B", "Option C"] dropdown = DropDown( "Drop Down", tooltip=" Select an option from the DropDown", populate_fn=dropdown_populate_fn, on_selection_fn=self._on_dropdown_item_selection, ) self.wrapped_ui_elements.append(dropdown) dropdown.repopulate() # This does not happen automatically, and it triggers the on_selection_fn color_picker = ColorPicker( "Color Picker", default_value=[0.69, 0.61, 0.39, 1.0], tooltip="Select a Color", on_color_picked_fn=self._on_color_picked, ) self.wrapped_ui_elements.append(color_picker) def _create_plotting_frame(self): self._plotting_frame = CollapsableFrame("Plotting Tools", collapsed=False) with self._plotting_frame: with ui.VStack(style=get_style(), spacing=5, height=0): import numpy as np x = np.arange(-1, 6.01, 0.01) y = np.sin((x - 0.5) * np.pi) plot = XYPlot( "XY Plot", tooltip="Press mouse over the plot for data label", x_data=[x[:300], x[100:400], x[200:]], y_data=[y[:300], y[100:400], y[200:]], x_min=None, # Use default behavior to fit plotted data to entire frame x_max=None, y_min=-1.5, y_max=1.5, x_label="X [rad]", y_label="Y", plot_height=10, legends=["Line 1", "Line 2", "Line 3"], show_legend=True, plot_colors=[ [255, 0, 0], [0, 255, 0], [0, 100, 200], ], # List of [r,g,b] values; not necessary to specify ) ###################################################################################### # Functions Below This Point Are Callback Functions Attached to UI Element Wrappers ###################################################################################### def _on_int_field_value_changed_fn(self, new_value: int): status = f"Value was changed in int field to {new_value}" self._status_report_field.set_text(status) def _on_float_field_value_changed_fn(self, new_value: float): status = f"Value was changed in float field to {new_value}" self._status_report_field.set_text(status) def _on_string_field_value_changed_fn(self, new_value: str): status = f"Value was changed in string field to {new_value}" self._status_report_field.set_text(status) def _on_button_clicked_fn(self): status = "The Button was Clicked!" self._status_report_field.set_text(status) def _on_state_btn_a_click_fn(self): status = "State Button was Clicked in State A!" self._status_report_field.set_text(status) def _on_state_btn_b_click_fn(self): status = "State Button was Clicked in State B!" self._status_report_field.set_text(status) def _on_checkbox_click_fn(self, value: bool): status = f"CheckBox was set to {value}!" self._status_report_field.set_text(status) def _on_dropdown_item_selection(self, item: str): status = f"{item} was selected from DropDown" self._status_report_field.set_text(status) def _on_color_picked(self, color: List[float]): formatted_color = [float("%0.2f" % i) for i in color] status = f"RGBA Color {formatted_color} was picked in the ColorPicker" self._status_report_field.set_text(status)
11,487
Python
38.888889
112
0.542178
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/lula_kinematic_solver.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers import ParallelGripper from omni.isaac.core.articulations import Articulation from omni.isaac.manipulators import SingleManipulator import numpy as np import carb from omni.isaac.motion_generation import ArticulationKinematicsSolver, LulaKinematicsSolver, interface_config_loader class LULAKinematicSolverExample(BaseSample): def __init__(self) -> None: super().__init__() # robot usd path carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/cobotta_pro_900/cobotta_pro_900/cobotta_pro_900.usd" # simulation step counter self._sim_step = 0 # robot joint default positions self._joints_default_positions = np.zeros(12) self._joints_default_positions[7] = 0.628 self._joints_default_positions[8] = 0.628 # Desired end effector pose self._target_pos = np.array([1.0, 0.0, 1.0]) self._target_rot = np.array([1.0, 0.0, 0.0, 0.0]) # wxyz quaternion # various solvers and controllers self._kine_solver = None self._articulation = None self._art_kine_solver = None self._articulation_controller = None return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # add robot to the scene add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/cobotta") self._articulation = Articulation("/World/cobotta") #define the gripper self._gripper = ParallelGripper( #We chose the following values while inspecting the articulation end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link", joint_prim_names=["finger_joint", "right_outer_knuckle_joint"], joint_opened_positions=np.array([0, 0]), joint_closed_positions=np.array([0.628, -0.628]), action_deltas=np.array([-0.628, 0.628]), ) #define the manipulator self._my_denso = self._world.scene.add( SingleManipulator( prim_path="/World/cobotta", name="cobotta_robot", end_effector_prim_name="onrobot_rg6_base_link", gripper=self._gripper) ) self._my_denso.set_joints_default_state( positions=self._joints_default_positions ) return async def setup_post_load(self): self._world = self.get_world() self._my_denso = self._world.scene.get_object("cobotta_robot") # laptop path self._desc_path = "/home/kimsooyoung/Documents/IssacSimTutorials/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/" self._urdf_path = "/home/kimsooyoung/Downloads/Source/cobotta_pro_900/" self._kine_solver = LulaKinematicsSolver( # robot_description_path=self._desc_path+"rmpflow/robot_descriptor_common.yaml", robot_description_path=self._desc_path+"rmpflow/robot_descriptor.yaml", urdf_path=self._urdf_path+"cobotta_pro_900.urdf", ) self._art_kine_solver = ArticulationKinematicsSolver(self._my_denso, self._kine_solver, "onrobot_rg6_base_link") self._articulation_controller = self._my_denso.get_articulation_controller() self._action = None self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_cb) await self._world.play_async() return async def setup_post_reset(self): self._articulation_controller.reset() await self._world.play_async() return def sim_step_cb(self, step_size): self._sim_step += 1 if (self._sim_step > 100) and (self._action is None): target_pos = np.array([0.5, 0.0, 0.5]) robot_base_translation, robot_base_orientation = self._my_denso.get_world_pose() self._kine_solver.set_robot_base_pose(robot_base_translation, robot_base_orientation) self._action, ik_success = self._art_kine_solver.compute_inverse_kinematics( target_pos ) # Apply action if ik_success: print("IK Great") else: print("IK failed") elif (self._sim_step > 100) and (self._action is not None): self._articulation_controller.apply_action(self._action) return
5,364
Python
39.037313
120
0.644295
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/README.md
# Loading Extension To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name} The user will see the extension appear on the toolbar on startup with the title they specified in the Extension Generator # Extension Usage This template provides the example usage for a library of UIElementWrapper objects that help to quickly develop custom UI tools with minimal boilerplate code. # Template Code Overview The template is well documented and is meant to be self-explanatory to the user should they start reading the provided python files. A short overview is also provided here: global_variables.py: A script that stores in global variables that the user specified when creating this extension such as the Title and Description. extension.py: A class containing the standard boilerplate necessary to have the user extension show up on the Toolbar. This class is meant to fulfill most ues-cases without modification. In extension.py, useful standard callback functions are created that the user may complete in ui_builder.py. ui_builder.py: This file is the user's main entrypoint into the template. Here, the user can see useful callback functions that have been set up for them, and they may also create UI buttons that are hooked up to more user-defined callback functions. This file is the most thoroughly documented, and the user should read through it before making serious modification.
1,488
Markdown
58.559998
132
0.793011
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/rmpflow/robot_descriptor.yaml
api_version: 1.0 cspace: - joint_1 - joint_2 - joint_3 - joint_4 - joint_5 - joint_6 root_link: world default_q: [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ] cspace_to_urdf_rules: [] composite_task_spaces: []
230
YAML
15.499999
38
0.56087
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/rmpflow/robot_descriptor_common.yaml
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # The robot descriptor defines the generalized coordinates and how to map those # to the underlying URDF dofs. api_version: 1.0 # Defines the generalized coordinates. Each generalized coordinate is assumed # to have an entry in the URDF. # RMPflow will only use these joints to control the robot position. cspace: - joint_1 - joint_2 - joint_3 - joint_4 - joint_5 - joint_6 # Global frame of the URDF root_link: world # The default cspace position of this robot default_q: [ 0.0,0.3,1.2,0.0,0.0,0.0 ] # RMPflow uses collision spheres to define the robot geometry in order to avoid # collisions with external obstacles. If no spheres are specified, RMPflow will # not be able to avoid obstacles. collision_spheres: - J1: - "center": [0.0, 0.0, 0.1] "radius": 0.08 - "center": [0.0, 0.0, 0.15] "radius": 0.08 - "center": [0.0, 0.0, 0.2] "radius": 0.08 - J2: - "center": [0.0, 0.08, 0.0] "radius": 0.08 - "center": [0.0, 0.16, 0.0] "radius": 0.08 - "center": [0.0, 0.175, 0.05] "radius": 0.065 - "center": [0.0, 0.175, 0.1] "radius": 0.065 - "center": [0.0, 0.175, 0.15] "radius": 0.065 - "center": [0.0, 0.175, 0.2] "radius": 0.065 - "center": [0.0, 0.175, 0.25] "radius": 0.065 - "center": [0.0, 0.175, 0.3] "radius": 0.065 - "center": [0.0, 0.175, 0.35] "radius": 0.065 - "center": [0.0, 0.175, 0.4] "radius": 0.065 - "center": [0.0, 0.175, 0.45] "radius": 0.065 - "center": [0.0, 0.175, 0.5] "radius": 0.065 - "center": [0.0, 0.1, 0.5] "radius": 0.07 - J3: - "center": [0.0, 0.025, 0] "radius": 0.065 - "center": [0.0, -0.025, 0] "radius": 0.065 - "center": [0.0, -0.025, 0.05] "radius": 0.065 - "center": [0.0, -0.025, 0.1] "radius": 0.065 - "center": [0.0, -0.025, 0.15] "radius": 0.06 - "center": [0.0, -0.025, 0.2] "radius": 0.06 - "center": [0.0, -0.025, 0.25] "radius": 0.06 - "center": [0.0, -0.025, 0.3] "radius": 0.06 - "center": [0.0, -0.025, 0.35] "radius": 0.055 - "center": [0.0, -0.025, 0.4] "radius": 0.055 - J5: - "center": [0.0, 0.05, 0] "radius": 0.055 - "center": [0.0, 0.1, 0] "radius": 0.055 - J6: - "center": [0.0, 0.0, -0.05] "radius": 0.05 - "center": [0.0, 0.0, -0.1] "radius": 0.05 - "center": [0.0, 0.0, -0.15] "radius": 0.05 - "center": [0.0, 0.0, 0.04] "radius": 0.035 - "center": [0.0, 0.0, 0.08] "radius": 0.035 - "center": [0.0, 0.0, 0.12] "radius": 0.035 - right_inner_knuckle: - "center": [0.0, 0.0, 0.0] "radius": 0.02 - "center": [0.0, -0.03, 0.025] "radius": 0.02 - "center": [0.0, -0.05, 0.05] "radius": 0.02 - right_inner_finger: - "center": [0.0, 0.02, 0.0] "radius": 0.015 - "center": [0.0, 0.02, 0.015] "radius": 0.015 - "center": [0.0, 0.02, 0.03] "radius": 0.015 - "center": [0.0, 0.025, 0.04] "radius": 0.01 - left_inner_knuckle: - "center": [0.0, 0.0, 0.0] "radius": 0.02 - "center": [0.0, -0.03, 0.025] "radius": 0.02 - "center": [0.0, -0.05, 0.05] "radius": 0.02 - left_inner_finger: - "center": [0.0, 0.02, 0.0] "radius": 0.015 - "center": [0.0, 0.02, 0.015] "radius": 0.015 - "center": [0.0, 0.02, 0.03] "radius": 0.015 - "center": [0.0, 0.025, 0.04] "radius": 0.01
3,988
YAML
26.701389
80
0.522818
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/tasks/pick_place.py
from omni.isaac.manipulators import SingleManipulator from omni.isaac.manipulators.grippers import ParallelGripper from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.stage import add_reference_to_stage import omni.isaac.core.tasks as tasks from typing import Optional import numpy as np import carb class PickPlace(tasks.PickPlace): def __init__( self, name: str = "denso_pick_place", cube_initial_position: Optional[np.ndarray] = None, cube_initial_orientation: Optional[np.ndarray] = None, target_position: Optional[np.ndarray] = None, offset: Optional[np.ndarray] = None, ) -> None: tasks.PickPlace.__init__( self, name=name, cube_initial_position=cube_initial_position, cube_initial_orientation=cube_initial_orientation, target_position=target_position, cube_size=np.array([0.0515, 0.0515, 0.0515]), offset=offset, ) carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/cobotta_pro_900/cobotta_pro_900/cobotta_pro_900.usd" return def set_robot(self) -> SingleManipulator: #TODO: change the asset path here # laptop add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/cobotta") gripper = ParallelGripper( end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link", joint_prim_names=["finger_joint", "right_outer_knuckle_joint"], joint_opened_positions=np.array([0, 0]), joint_closed_positions=np.array([0.628, -0.628]), action_deltas=np.array([-0.2, 0.2]) ) manipulator = SingleManipulator( prim_path="/World/cobotta", name="cobotta_robot", end_effector_prim_name="onrobot_rg6_base_link", gripper=gripper ) joints_default_positions = np.zeros(12) joints_default_positions[7] = 0.628 joints_default_positions[8] = 0.628 manipulator.set_joints_default_state(positions=joints_default_positions) return manipulator
2,427
Python
36.937499
117
0.639061
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/controllers/pick_place.py
import omni.isaac.manipulators.controllers as manipulators_controllers from omni.isaac.manipulators.grippers import ParallelGripper from .rmpflow import RMPFlowController from omni.isaac.core.articulations import Articulation class PickPlaceController(manipulators_controllers.PickPlaceController): def __init__( self, name: str, gripper: ParallelGripper, robot_articulation: Articulation, events_dt=None ) -> None: if events_dt is None: #These values needs to be tuned in general, you checkout each event in execution and slow it down or speed #it up depends on how smooth the movments are events_dt = [0.005, 0.002, 1, 0.05, 0.0008, 0.005, 0.0008, 0.1, 0.0008, 0.008] manipulators_controllers.PickPlaceController.__init__( self, name=name, cspace_controller=RMPFlowController( name=name + "_cspace_controller", robot_articulation=robot_articulation ), gripper=gripper, events_dt=events_dt, #This value can be changed # start_picking_height=0.6 ) return
1,184
Python
38.499999
118
0.640203
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipLULA/controllers/rmpflow.py
import omni.isaac.motion_generation as mg from omni.isaac.core.articulations import Articulation class RMPFlowController(mg.MotionPolicyController): def __init__(self, name: str, robot_articulation: Articulation, physics_dt: float = 1.0 / 60.0) -> None: # TODO: chamge the follow paths # laptop self._desc_path = "/home/kimsooyoung/Documents/IssacSimTutorials/rb_issac_tutorial/RoadBalanceEdu/ManipFollowTarget/" self._urdf_path = "/home/kimsooyoung/Downloads/Source/cobotta_pro_900/" self.rmpflow = mg.lula.motion_policies.RmpFlow( robot_description_path=self._desc_path+"rmpflow/robot_descriptor.yaml", rmpflow_config_path=self._desc_path+"rmpflow/denso_rmpflow_common.yaml", urdf_path=self._urdf_path+"cobotta_pro_900.urdf", end_effector_frame_name="onrobot_rg6_base_link", maximum_substep_size=0.00334 ) self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmpflow, physics_dt) mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp) self._default_position, self._default_orientation = ( self._articulation_motion_policy._robot_articulation.get_world_pose() ) self._motion_policy.set_robot_base_pose( robot_position=self._default_position, robot_orientation=self._default_orientation ) return def reset(self): mg.MotionPolicyController.reset(self) self._motion_policy.set_robot_base_pose( robot_position=self._default_position, robot_orientation=self._default_orientation )
1,686
Python
45.86111
125
0.68446
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/WheeledRobotLimoDiff/limo_diff_drive.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.wheeled_robots.controllers import WheelBasePoseController from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController import numpy as np import carb class LimoDiffDrive(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/limo_base.usd" self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/limo_diff_thin.usd" return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/Limo") # Reference : https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.wheeled_robots/docs/index.html?highlight=wheeledrobot#omni.isaac.wheeled_robots.robots.WheeledRobot self._wheeled_robot = world.scene.add( WheeledRobot( prim_path="/World/Limo/base_link", name="my_limo", # Caution. Those are DOF "Joints", Not "Links" wheel_dof_names=[ "front_left_wheel", "front_right_wheel", "rear_left_wheel", "rear_right_wheel", ], create_robot=False, usd_path=self._robot_path, position=np.array([0, 0.0, 0.02]), orientation=np.array([1.0, 0.0, 0.0, 0.0]), ) ) self._save_count = 0 self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) return async def setup_post_load(self): self._world = self.get_world() # Reference : https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.wheeled_robots/docs/index.html?highlight=differentialcontroller self._diff_controller = DifferentialController( name="simple_control", wheel_radius=0.045, # Caution. This will not be the same with a real wheelbase for 4WD cases. # Reference : https://forums.developer.nvidia.com/t/how-to-drive-clearpath-jackal-via-ros2-messages-in-isaac-sim/275907/4 wheel_base=0.43 ) self._diff_controller.reset() self._wheeled_robot.initialize() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._save_count += 1 wheel_action = None # linear X, angular Z commands if self._save_count >= 0 and self._save_count < 150: wheel_action = self._diff_controller.forward(command=[0.3, 0.0]) elif self._save_count >= 150 and self._save_count < 300: wheel_action = self._diff_controller.forward(command=[-0.3, 0.0]) elif self._save_count >= 300 and self._save_count < 450: wheel_action = self._diff_controller.forward(command=[0.0, 0.3]) elif self._save_count >= 450 and self._save_count < 600: wheel_action = self._diff_controller.forward(command=[0.0, -0.3]) else: self._save_count = 0 wheel_action.joint_velocities = np.hstack((wheel_action.joint_velocities, wheel_action.joint_velocities)) self._wheeled_robot.apply_wheel_actions(wheel_action) return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._save_count = 0 self._world.pause() return async def setup_post_reset(self): self._diff_controller.reset() await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
5,079
Python
39.64
196
0.6456
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/MirobotPickandPlaceROS2/pick_place_example.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.manipulators.grippers.surface_gripper import SurfaceGripper from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.manipulators import SingleManipulator from omni.isaac.dynamic_control import _dynamic_control as dc from omni.isaac.core.prims import RigidPrim, GeometryPrim from pxr import Gf, Sdf, UsdGeom, UsdLux, UsdPhysics import omni.graph.core as og import numpy as np import omni import carb from .controllers.pick_place import PickPlaceController def createRigidBody(stage, bodyType, boxActorPath, mass, scale, position, rotation, color): p = Gf.Vec3f(position[0], position[1], position[2]) orientation = Gf.Quatf(rotation[0], rotation[1], rotation[2], rotation[3]) scale = Gf.Vec3f(scale[0], scale[1], scale[2]) bodyGeom = bodyType.Define(stage, boxActorPath) bodyPrim = stage.GetPrimAtPath(boxActorPath) bodyGeom.AddTranslateOp().Set(p) bodyGeom.AddOrientOp().Set(orientation) bodyGeom.AddScaleOp().Set(scale) bodyGeom.CreateDisplayColorAttr().Set([color]) UsdPhysics.CollisionAPI.Apply(bodyPrim) if mass > 0: massAPI = UsdPhysics.MassAPI.Apply(bodyPrim) massAPI.CreateMassAttr(mass) UsdPhysics.RigidBodyAPI.Apply(bodyPrim) UsdPhysics.CollisionAPI(bodyPrim) return bodyGeom class PickandPlaceExample(BaseSample): def __init__(self) -> None: super().__init__() self._gripper = None self._my_mirobot = None self._articulation_controller = None # simulation step counter self._sim_step = 0 self._target_position = np.array([0.2, -0.08, 0.06]) return def og_setup(self): domain_id = 30 try: # Clock OG og.Controller.edit( {"graph_path": "/ROS2Clock", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("publishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "publishClock.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"), ("context.outputs:context", "publishClock.inputs:context"), ], }, ) # Joint Pub Sub og.Controller.edit( {"graph_path": "/ROS2JointPubSub", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("PublishJointState", "omni.isaac.ros2_bridge.ROS2PublishJointState"), ("SubscribeJointState", "omni.isaac.ros2_bridge.ROS2SubscribeJointState"), ("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"), ("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ], og.Controller.Keys.CONNECT: [ ("OnPlaybackTick.outputs:tick", "PublishJointState.inputs:execIn"), ("OnPlaybackTick.outputs:tick", "SubscribeJointState.inputs:execIn"), ("OnPlaybackTick.outputs:tick", "ArticulationController.inputs:execIn"), ("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"), ("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"), ("SubscribeJointState.outputs:positionCommand", "ArticulationController.inputs:positionCommand"), ("SubscribeJointState.outputs:velocityCommand", "ArticulationController.inputs:velocityCommand"), ("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"), ], og.Controller.Keys.SET_VALUES: [ ("ArticulationController.inputs:usePath", True), ("ArticulationController.inputs:robotPath", "/World/mirobot"), ("PublishJointState.inputs:targetPrim", "/World/mirobot"), ("SubscribeJointState.inputs:topicName", "/issac/joint_command"), ("PublishJointState.inputs:topicName", "/issac/joint_states"), ], }, ) except Exception as e: print(e) def setup_robot(self): carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/mirobot_ros2/mirobot_description/urdf/mirobot_urdf_2/mirobot_urdf_2_ee.usd" add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/mirobot") # define the gripper self._gripper = SurfaceGripper( end_effector_prim_path="/World/mirobot/ee_link", translate=0.02947, direction="x", # kp=1.0e4, # kd=1.0e3, # disable_gravity=False, ) self._gripper.set_force_limit(value=1.0e2) self._gripper.set_torque_limit(value=1.0e3) # define the manipulator self._my_mirobot = self._world.scene.add( SingleManipulator( prim_path="/World/mirobot", name="mirobot", end_effector_prim_name="ee_link", gripper=self._gripper ) ) self._joints_default_positions = np.zeros(6) self._my_mirobot.set_joints_default_state(positions=self._joints_default_positions) def setup_bin(self): self._nucleus_server = get_assets_root_path() table_path = self._nucleus_server + "/Isaac/Props/KLT_Bin/small_KLT.usd" add_reference_to_stage(usd_path=table_path, prim_path=f"/World/bin") self._bin_initial_position = np.array([0.2, 0.08, 0.06]) / get_stage_units() self._packing_bin = self._world.scene.add( GeometryPrim( prim_path="/World/bin", name=f"packing_bin", position=self._bin_initial_position, orientation=euler_angles_to_quat(np.array([np.pi, 0, 0])), scale=np.array([0.25, 0.25, 0.25]), collision=True ) ) self._packing_bin_geom = self._world.scene.get_object(f"packing_bin") massAPI = UsdPhysics.MassAPI.Apply(self._packing_bin_geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(0.001) def setup_box(self): # Box to be picked self.box_start_pose = dc.Transform([0.2, 0.08, 0.06], [1, 0, 0, 0]) self._stage = omni.usd.get_context().get_stage() self._boxGeom = createRigidBody( self._stage, UsdGeom.Cube, "/World/Box", 0.0010, [0.015, 0.015, 0.015], self.box_start_pose.p, self.box_start_pose.r, [0.2, 0.2, 1] ) def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() self.setup_robot() # self.setup_bin() self.setup_box() self.og_setup() return async def setup_post_load(self): self._world = self.get_world() self._my_controller = PickPlaceController( name="controller", gripper=self._gripper, robot_articulation=self._my_mirobot, events_dt=[ 0.008, 0.005, 0.1, 0.1, 0.0025, 0.5, 0.0025, 0.1, 0.008, 0.08 ], ) self._articulation_controller = self._my_mirobot.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_cb) return async def setup_post_reset(self): self._my_controller.reset() await self._world.play_async() return def sim_step_cb(self, step_size): # # bin case # bin_pose, _ = self._packing_bin_geom.get_world_pose() # pick_position = bin_pose # place_position = self._target_position # box case box_matrix = omni.usd.get_world_transform_matrix(self._boxGeom) box_trans = box_matrix.ExtractTranslation() pick_position = np.array(box_trans) place_position = self._target_position joints_state = self._my_mirobot.get_joints_state() actions = self._my_controller.forward( picking_position=pick_position, placing_position=place_position, current_joint_positions=joints_state.positions, # This offset needs tuning as well end_effector_offset=np.array([0, 0, 0.02947+0.02]), end_effector_orientation=euler_angles_to_quat(np.array([0, 0, 0])), ) if self._my_controller.is_done(): print("done picking and placing") self._articulation_controller.apply_action(actions) return
10,546
Python
39.102661
140
0.583634
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/MirobotPickandPlaceROS2/rmpflow/robot_descriptor.yaml
api_version: 1.0 cspace: - joint1 - joint2 - joint3 - joint4 - joint5 - joint6 root_link: world default_q: [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ] cspace_to_urdf_rules: [] composite_task_spaces: []
224
YAML
15.071427
38
0.575893
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/MirobotPickandPlaceROS2/controllers/pick_place.py
from omni.isaac.manipulators.grippers.surface_gripper import SurfaceGripper import omni.isaac.manipulators.controllers as manipulators_controllers from .rmpflow import RMPFlowController from omni.isaac.core.articulations import Articulation # - Phase 0: Move end_effector above the cube center at the 'end_effector_initial_height'. # - Phase 1: Lower end_effector down to encircle the target cube # - Phase 2: Wait for Robot's inertia to settle. # - Phase 3: close grip. # - Phase 4: Move end_effector up again, keeping the grip tight (lifting the block). # - Phase 5: Smoothly move the end_effector toward the goal xy, keeping the height constant. # - Phase 6: Move end_effector vertically toward goal height at the 'end_effector_initial_height'. # - Phase 7: loosen the grip. # - Phase 8: Move end_effector vertically up again at the 'end_effector_initial_height' # - Phase 9: Move end_effector towards the old xy position. class PickPlaceController(manipulators_controllers.PickPlaceController): def __init__( self, name: str, gripper: SurfaceGripper, robot_articulation: Articulation, events_dt=None ) -> None: if events_dt is None: #These values needs to be tuned in general, you checkout each event in execution and slow it down or speed #it up depends on how smooth the movments are events_dt = [0.005, 0.002, 1, 0.05, 0.0008, 0.005, 0.0008, 0.1, 0.0008, 0.008] manipulators_controllers.PickPlaceController.__init__( self, name=name, cspace_controller=RMPFlowController( name=name + "_cspace_controller", robot_articulation=robot_articulation ), gripper=gripper, events_dt=events_dt, end_effector_initial_height=0.05, #This value can be changed # start_picking_height=0.6 ) return
1,930
Python
44.976189
118
0.674611
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloRobot/hello_robot.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.types import ArticulationAction import numpy as np class HelloRobot(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() assets_root_path = get_assets_root_path() jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd" self._jetbot = world.scene.add( WheeledRobot( prim_path="/World/Fancy_Robot", name="fancy_robot", wheel_dof_names=["left_wheel_joint", "right_wheel_joint"], create_robot=True, usd_path=jetbot_asset_path, ) ) return async def setup_post_load(self): self._world = self.get_world() self._jetbot = self._world.scene.get_object("fancy_robot") self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._jetbot.apply_wheel_actions(ArticulationAction(joint_positions=None, joint_efforts=None, joint_velocities=5 * np.random.rand(2,))) return
1,954
Python
39.729166
101
0.634084
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipGripperControl/gripper_control.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers import ParallelGripper from omni.isaac.manipulators import SingleManipulator import numpy as np import carb class GripperControl(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/cobotta_pro_900/cobotta_pro_900/cobotta_pro_900.usd" self._joints_default_positions = np.zeros(12) self._joints_default_positions[7] = 0.628 self._joints_default_positions[8] = 0.628 # simulation step counter self._sim_step = 0 return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() # add robot to the scene add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/cobotta") #define the gripper self._gripper = ParallelGripper( #We chose the following values while inspecting the articulation end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link", joint_prim_names=["finger_joint", "right_outer_knuckle_joint"], joint_opened_positions=np.array([0, 0]), joint_closed_positions=np.array([0.628, -0.628]), action_deltas=np.array([-0.628, 0.628]), ) #define the manipulator self._my_denso = self._world.scene.add( SingleManipulator( prim_path="/World/cobotta", name="cobotta_robot", end_effector_prim_name="onrobot_rg6_base_link", gripper=self._gripper) ) self._my_denso.set_joints_default_state( positions=self._joints_default_positions ) return async def setup_post_load(self): self._world = self.get_world() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._sim_step += 1 gripper_positions = self._my_denso.gripper.get_joint_positions() if self._sim_step < 500: #close the gripper slowly self._my_denso.gripper.apply_action( ArticulationAction( joint_positions=[ gripper_positions[0] + 0.1, gripper_positions[1] - 0.1 ] )) if self._sim_step > 500: #open the gripper slowly self._my_denso.gripper.apply_action( ArticulationAction( joint_positions=[ gripper_positions[0] - 0.1, gripper_positions[1] + 0.1 ] )) if self._sim_step == 1000: self._sim_step = 0 return
3,791
Python
35.461538
117
0.608283
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/garage_conveyor.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample import numpy as np from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import Sdf, UsdLux, Gf from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.core import SimulationContext import omni.replicator.core as rep import carb import omni from os.path import expanduser import datetime now = datetime.datetime.now() PROPS = { 'spam' : "/Isaac/Props/YCB/Axis_Aligned/010_potted_meat_can.usd", 'jelly' : "/Isaac/Props/YCB/Axis_Aligned/009_gelatin_box.usd", 'tuna' : "/Isaac/Props/YCB/Axis_Aligned/007_tuna_fish_can.usd", 'cleanser' : "/Isaac/Props/YCB/Axis_Aligned/021_bleach_cleanser.usd", 'tomato_soup' : "/Isaac/Props/YCB/Axis_Aligned/005_tomato_soup_can.usd" } class GarageConveyor(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._nucleus_server = get_assets_root_path() # Enable scripts carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True) # Disable capture on play and async rendering carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False) carb.settings.get_settings().set("/omni/replicator/asyncRendering", False) carb.settings.get_settings().set("/app/asyncRendering", False) # Replicator Writerdir now_str = now.strftime("%Y-%m-%d_%H:%M:%S") self._out_dir = str(expanduser("~") + "/Documents/grocery_data_" + now_str) self._franka_position = np.array([-0.8064, 1.3602, 0.0]) # (w, x, y, z) self._franka_rotation = np.array([0.0, 0.0, 0.0, 1.0]) self._table_scale = 0.01 self._table_height = 0.0 self._table_position = np.array([-0.7, 1.8, 0.007]) # Gf.Vec3f(0.5, 0.0, 0.0) self._bin_path = self._nucleus_server + "/Isaac/Props/KLT_Bin/small_KLT_visual.usd" self._bin_scale = np.array([2.0, 2.0, 1.0]) self._test_bin_position = np.array([-1.75, 1.2, 0.85]) self._test_bin_orientation = np.array([0.7071068, 0, 0, 0.7071068]) self._bin1_position = np.array([-0.5, 2.1, 0.90797]) self._bin2_position = np.array([-0.5, 1.6, 0.90797]) self._plane_scale = np.array([0.4, 0.24, 1.0]) self._plane_position = np.array([-1.75, 1.2, 0.9]) self._plane_rotation = np.array([0.0, 0.0, 0.0]) return def add_background(self): self._world = self.get_world() bg_path = self._server_root + "/Projects/RBROS2/ConveyorGarage/Garage_wo_Conv_OG.usd" add_reference_to_stage(usd_path=bg_path, prim_path=f"/World/Garage") def add_training_bin(self): add_reference_to_stage(usd_path=self._bin_path, prim_path="/World/training_bin") self._world.scene.add(GeometryPrim(prim_path="/World/training_bin", name=f"training_bin_ref_geom", collision=True)) self._bin1_ref_geom = self._world.scene.get_object(f"training_bin_ref_geom") self._bin1_ref_geom.set_local_scale(np.array([self._bin_scale])) self._bin1_ref_geom.set_world_pose( position=self._test_bin_position, orientation=self._test_bin_orientation ) self._bin1_ref_geom.set_default_state( position=self._test_bin_position, orientation=self._test_bin_orientation ) def add_light(self): stage = omni.usd.get_context().get_stage() distantLight = UsdLux.CylinderLight.Define(stage, Sdf.Path("/World/cylinderLight")) distantLight.CreateIntensityAttr(60000) distantLight.AddTranslateOp().Set(Gf.Vec3f(-1.2, 0.9, 3.0)) distantLight.AddScaleOp().Set((0.1, 4.0, 0.1)) distantLight.AddRotateXYZOp().Set((0, 0, 90)) def random_props(self, file_name, class_name, max_number=3, one_in_n_chance=4): file_name = self._nucleus_server + file_name instances = rep.randomizer.instantiate(file_name, size=max_number, mode='scene_instance') with instances: rep.physics.collider() rep.modify.semantics([('class', class_name)]) rep.randomizer.scatter_2d(self.plane, check_for_collisions=True) rep.modify.pose( rotation=rep.distribution.uniform((-180,-180, -180), (180, 180, 180)), ) visibility_dist = [True] + [False]*(one_in_n_chance) rep.modify.visibility(rep.distribution.choice(visibility_dist)) def add_replicator(self): self.cam = rep.create.camera( position=(-1.75, 1.2, 2.0), # rotation=(-90, 0, 0), look_at=(-1.75, 1.2, 0.8) ) # self.rp = rep.create.render_product(self.cam, resolution=(1024, 1024)) self.rp = rep.create.render_product(self.cam, resolution=(1280, 720)) self.plane = rep.create.plane( scale=self._plane_scale, position=self._plane_position, rotation=self._plane_rotation, visible=False ) rep.randomizer.register(self.random_props) return def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() self.add_background() self.add_light() self.add_training_bin() self.add_replicator() self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) return async def setup_post_load(self): self._world = self.get_world() self._world.scene.enable_bounding_boxes_computations() with rep.trigger.on_frame(num_frames=50, interval=2): for n, f in PROPS.items(): self.random_props(f, n) # Create a writer and apply the augmentations to its corresponding annotators self._writer = rep.WriterRegistry.get("BasicWriter") print(f"Writing data to: {self._out_dir}") self._writer.initialize( output_dir=self._out_dir, rgb=True, bounding_box_2d_tight=True, ) # Attach render product to writer self._writer.attach([self.rp]) return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): self._world.pause() return
7,479
Python
37.556701
123
0.63578
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/inference/model_info.py
import tritonclient.grpc as grpcclient inference_server_url = "localhost:8003" triton_client = grpcclient.InferenceServerClient(url=inference_server_url) # find out info about model model_name = "our_new_model" config_info = triton_client.get_model_config(model_name) print(config_info)
290
Python
25.454543
74
0.793103
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/inference/inference.py
from tritonclient.utils import triton_to_np_dtype import tritonclient.grpc as grpcclient import cv2 import numpy as np from matplotlib import pyplot as plt inference_server_url = "localhost:8003" triton_client = grpcclient.InferenceServerClient(url=inference_server_url) model_name = "our_new_model" # load image data target_width, target_height = 1280, 720 image_bgr = cv2.imread("rgb_0055.png") # image_bgr = cv2.imread("rgb_0061.png") # image_bgr = cv2.imread("rgb_0083.png") image_bgr = cv2.resize(image_bgr, (target_width, target_height)) image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) image = np.float32(image_rgb) # preprocessing image = image/255 image = np.moveaxis(image, -1, 0) # HWC to CHW image = image[np.newaxis, :] # add batch dimension image = np.float32(image) plt.imshow(image_rgb) # create input input_name = "input" inputs = [grpcclient.InferInput(input_name, image.shape, "FP32")] inputs[0].set_data_from_numpy(image) output_names = ["boxes", "labels", "scores"] outputs = [grpcclient.InferRequestedOutput(n) for n in output_names] results = triton_client.infer(model_name, inputs, outputs=outputs) boxes, labels, scores = [results.as_numpy(o) for o in output_names] # annotate annotated_image = image_bgr.copy() props_dict = { 0: 'klt_bin', 1: 'tomato_soup', 2: 'tuna', 3: 'spam', 4: 'jelly', 5: 'cleanser', } if boxes.size > 0: # ensure something is found for box, lab, scr in zip(boxes, labels, scores): if scr > 0.4: box_top_left = int(box[0]), int(box[1]) box_bottom_right = int(box[2]), int(box[3]) text_origin = int(box[0]), int(box[3]) border_color = list(np.random.random(size=3) * 256) text_color = (255, 255, 255) font_scale = 0.9 thickness = 1 # bounding box2 img = cv2.rectangle( annotated_image, box_top_left, box_bottom_right, border_color, thickness=5, lineType=cv2.LINE_8 ) print(f"index: {lab}, label: {props_dict[lab]}, score: {scr:.2f}") # For the text background # Finds space required by the text so that we can put a background with that amount of width. (w, h), _ = cv2.getTextSize( props_dict[lab], cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1 ) # Prints the text. img = cv2.rectangle( img, (box_top_left[0], box_top_left[1] - 20), (box_top_left[0] + w, box_top_left[1]), border_color, -1 ) img = cv2.putText( img, props_dict[lab], box_top_left, cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 1 ) plt.imshow(cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)) plt.show()
2,945
Python
28.757575
105
0.582343
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/export/model_export.py
import os import torch import torchvision import warnings warnings.filterwarnings("ignore") device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # load the PyTorch model. pytorch_dir = "/home/kimsooyoung/Documents/model.pth" model = torch.load(pytorch_dir).cuda() # Export Model width = 1280 height = 720 dummy_input = torch.rand(1, 3, height, width).cuda() torch.onnx.export( model, dummy_input, "model.onnx", opset_version=11, input_names=["input"], output_names=["boxes", "labels", "scores"] )
556
Python
20.423076
83
0.69964
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/viz/data_visualize.py
import os import json import hashlib from PIL import Image import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches # # Desktop # data_dir = "/home/kimsooyoung/Documents/grocery_data_2024-05-21_18:52:00" # Laptop data_dir = "/home/kimsooyoung/Documents/grocery_data_2024-05-23_16:43:00" out_dir = "/home/kimsooyoung/Documents" number = "0025" # Write Visualization Functions # data_to_colour # takes in our data from a specific label ID and maps it to the proper color for the bounding box. def data_to_colour(data): if isinstance(data, str): data = bytes(data, "utf-8") else: data = bytes(data) m = hashlib.sha256() m.update(data) key = int(m.hexdigest()[:8], 16) r = ((((key >> 0) & 0xFF) + 1) * 33) % 255 g = ((((key >> 8) & 0xFF) + 1) * 33) % 255 b = ((((key >> 16) & 0xFF) + 1) * 33) % 255 inv_norm_i = 128 * (3.0 / (r + g + b)) return (int(r * inv_norm_i) / 255, int(g * inv_norm_i) / 255, int(b * inv_norm_i) / 255) # colorize_bbox_2d # takes in the path to the RGB image for the background, # the bounding box data, the labels, and the path to store the visualization. # It outputs a colorized bounding box. def colorize_bbox_2d(rgb_path, data, id_to_labels, file_path): rgb_img = Image.open(rgb_path) colors = [data_to_colour(bbox["semanticId"]) for bbox in data] fig, ax = plt.subplots(figsize=(10, 10)) ax.imshow(rgb_img) for bbox_2d, color, index in zip(data, colors, id_to_labels.keys()): labels = id_to_labels[str(index)] rect = patches.Rectangle( xy=(bbox_2d["x_min"], bbox_2d["y_min"]), width=bbox_2d["x_max"] - bbox_2d["x_min"], height=bbox_2d["y_max"] - bbox_2d["y_min"], edgecolor=color, linewidth=2, label=labels, fill=False, ) ax.add_patch(rect) plt.legend(loc="upper left") plt.savefig(file_path) # Load Synthetic Data and Visualize rgb_path = data_dir rgb = "rgb_"+number+".png" rgb_path = os.path.join(rgb_path, rgb) import os print(os.path.abspath(".")) # load the bounding box data. npy_path = data_dir bbox2d_tight_file_name = "bounding_box_2d_tight_"+number+".npy" data = np.load(os.path.join(npy_path, bbox2d_tight_file_name)) # load the labels corresponding to the image. json_path = data_dir bbox2d_tight_labels_file_name = "bounding_box_2d_tight_labels_"+number+".json" bbox2d_tight_id_to_labels = None with open(os.path.join(json_path, bbox2d_tight_labels_file_name), "r") as json_data: bbox2d_tight_id_to_labels = json.load(json_data) # Finally, we can call our function and see the labeled image! colorize_bbox_2d(rgb_path, data, bbox2d_tight_id_to_labels, os.path.join(out_dir, "bbox2d_tight.png"))
2,789
Python
31.823529
102
0.642524
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactory/train/fast_rcnn_train.py
from PIL import Image import os import numpy as np import torch import torch.utils.data import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision import transforms as T import json import shutil epochs = 15 num_classes = 6 data_dir = "/home/kimsooyoung/Documents/grocery_data_2024-05-23_16:43:00" output_file = "/home/kimsooyoung/Documents/model.pth" device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') print(f"Using device: {device}") class GroceryDataset(torch.utils.data.Dataset): # This function is run once when instantiating the Dataset object def __init__(self, root, transforms): self.root = root self.transforms = transforms # In the first portion of this code we are taking our single dataset folder # and splitting it into three folders based on the file types. # This is just a preprocessing step. list_ = os.listdir(root) for file_ in list_: name, ext = os.path.splitext(file_) ext = ext[1:] if ext == '': continue if os.path.exists(root+ '/' + ext): shutil.move(root+'/'+file_, root+'/'+ext+'/'+file_) else: os.makedirs(root+'/'+ext) shutil.move(root+'/'+file_, root+'/'+ext+'/'+file_) self.imgs = list(sorted(os.listdir(os.path.join(root, "png")))) self.label = list(sorted(os.listdir(os.path.join(root, "json")))) self.box = list(sorted(os.listdir(os.path.join(root, "npy")))) # We have our three attributes with the img, label, and box data # Loads and returns a sample from the dataset at the given index idx def __getitem__(self, idx): img_path = os.path.join(self.root, "png", self.imgs[idx]) img = Image.open(img_path).convert("RGB") label_path = os.path.join(self.root, "json", self.label[idx]) with open(os.path.join('root', label_path), "r") as json_data: json_labels = json.load(json_data) box_path = os.path.join(self.root, "npy", self.box[idx]) dat = np.load(str(box_path)) boxes = [] labels = [] for i in dat: obj_val = i[0] xmin = torch.as_tensor(np.min(i[1]), dtype=torch.float32) xmax = torch.as_tensor(np.max(i[3]), dtype=torch.float32) ymin = torch.as_tensor(np.min(i[2]), dtype=torch.float32) ymax = torch.as_tensor(np.max(i[4]), dtype=torch.float32) if (ymax > ymin) & (xmax > xmin): boxes.append([xmin, ymin, xmax, ymax]) area = (xmax - xmin) * (ymax - ymin) labels += [json_labels.get(str(obj_val)).get('class')] label_dict = {} # Labels for the dataset static_labels = { 'klt_bin' : 0, 'tomato_soup' : 1, 'tuna' : 2, 'spam' : 3, 'jelly' : 4, 'cleanser' : 5 } labels_out = [] # Transforming the input labels into a static label dictionary to use for i in range(len(labels)): label_dict[i] = labels[i] for i in label_dict: fruit = label_dict[i] final_fruit_label = static_labels[fruit] labels_out += [final_fruit_label] target = {} target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32) target["labels"] = torch.as_tensor(labels_out, dtype=torch.int64) target["image_id"] = torch.tensor([idx]) target["area"] = area if self.transforms is not None: img= self.transforms(img) return img, target # Finally we have a function for the number of samples in our dataset def __len__(self): return len(self.imgs) # Create Helper Functions # converting to `Tensor` objects and also converting the `dtypes`. def get_transform(train): transforms = [] transforms.append(T.PILToTensor()) transforms.append(T.ConvertImageDtype(torch.float)) return T.Compose(transforms) # Create a function to collate our samples. def collate_fn(batch): return tuple(zip(*batch)) # Create Model and Train # We are starting with the pretrained (default weights) object detection # fasterrcnn_resnet50 model from Torchvision. def create_model(num_classes): model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights='DEFAULT') in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) return model # create our dataset by using our custom GroceryDataset class # This is then passed into our DataLoader. dataset = GroceryDataset(data_dir, get_transform(train=True)) data_loader = torch.utils.data.DataLoader( dataset, # batch_size=16, batch_size=8, shuffle=True, collate_fn=collate_fn ) # create our model with the N classes # And then transfer it to the GPU for training. model = create_model(num_classes) model.to(device) params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.001) len_dataloader = len(data_loader) # Now we can actually train our model. # Keep track of our loss and print it out as we train. model.train() ep = 0 for epoch in range(epochs): optimizer.zero_grad() ep += 1 i = 0 for imgs, annotations in data_loader: i += 1 imgs = list(img.to(device) for img in imgs) annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations] loss_dict = model(imgs, annotations) losses = sum(loss for loss in loss_dict.values()) losses.backward() optimizer.step() print(f'Epoch: {ep} Iteration: {i}/{len_dataloader}, Loss: {losses}') torch.save(model, output_file)
5,909
Python
33.360465
84
0.616856
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/SimpleRobotFollowTarget/follow_target_example.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers import ParallelGripper from omni.isaac.manipulators import SingleManipulator import omni.isaac.core.tasks as tasks from typing import Optional import numpy as np import carb from .ik_solver import KinematicsSolver from .controllers.rmpflow import RMPFlowController # Inheriting from the base class Follow Target class FollowTarget(tasks.FollowTarget): def __init__( self, name: str = "mirobot_follow_target", target_prim_path: Optional[str] = None, target_name: Optional[str] = None, target_position: Optional[np.ndarray] = None, target_orientation: Optional[np.ndarray] = None, offset: Optional[np.ndarray] = None, ) -> None: tasks.FollowTarget.__init__( self, name=name, target_prim_path=target_prim_path, target_name=target_name, target_position=target_position, target_orientation=target_orientation, offset=offset, ) carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/simple_robo_arm.usd" self._joints_default_positions = np.zeros(7) return def set_robot(self) -> SingleManipulator: # add robot to the scene add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/simple_robot") # #define the gripper # gripper = ParallelGripper( # #We chose the following values while inspecting the articulation # end_effector_prim_path="/World/mirobot/onrobot_rg6_base_link", # joint_prim_names=["finger_joint", "right_outer_knuckle_joint"], # joint_opened_positions=np.array([0, 0]), # joint_closed_positions=np.array([0.628, -0.628]), # action_deltas=np.array([-0.628, 0.628]), # ) # define the manipulator manipulator = SingleManipulator( prim_path="/World/simple_robot", name="simple_robot", end_effector_prim_name="link8_1", gripper=None, ) manipulator.set_joints_default_state(positions=self._joints_default_positions) return manipulator class FollowTargetExample(BaseSample): def __init__(self) -> None: super().__init__() self._articulation_controller = None self._my_controller = None # simulation step counter self._sim_step = 0 return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # We add the task to the world here my_task = FollowTarget( name="simple_robot_follow_target", target_position=np.array([0.15, 0, 0.15]), target_orientation=np.array([1, 0, 0, 0]), ) self._world.add_task(my_task) return async def setup_post_load(self): self._world = self.get_world() self._task_params = self._world.get_task("simple_robot_follow_target").get_params() self._target_name = self._task_params["target_name"]["value"] self._my_mirobot = self._world.scene.get_object(self._task_params["robot_name"]["value"]) # # IK controller # self._my_controller = KinematicsSolver(self._my_mirobot) # RMPFlow controller self._my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=self._my_mirobot) self._articulation_controller = self._my_mirobot.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_cb) return async def setup_post_reset(self): self._my_controller.reset() await self._world.play_async() return def sim_step_cb(self, step_size): observations = self._world.get_observations() pos = observations[self._target_name]["position"] ori = observations[self._target_name]["orientation"] # # IK controller # actions, succ = self._my_controller.compute_inverse_kinematics( # target_position=pos # ) # if succ: # self._articulation_controller.apply_action(actions) # else: # carb.log_warn("IK did not converge to a solution. No action is being taken.") # RMPFlow controller actions = self._my_controller.forward( target_end_effector_position=pos, target_end_effector_orientation=ori, ) self._articulation_controller.apply_action(actions) return
5,527
Python
35.130719
119
0.641578
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/SimpleRobotFollowTarget/rmpflow/robot_descriptor.yaml
api_version: 1.0 cspace: - link1_to_base_link - link2_to_link1 - link3_to_link2 - link4_to_link3 - link5_to_link4 - link6_to_link5 - link7_to_link6 root_link: world default_q: [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ] cspace_to_urdf_rules: [] composite_task_spaces: []
303
YAML
19.266665
44
0.60396
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipURGripper/ur10_gripper.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators import SingleManipulator from omni.isaac.core.objects import DynamicCuboid from omni.isaac.manipulators.grippers import SurfaceGripper from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController import numpy as np import carb class UR10Gripper(BaseSample): def __init__(self) -> None: super().__init__() self._my_controller = None self._articulation_controller = None # simulation step counter self._sim_step = 0 return def setup_robot(self): self._world = self.get_world() assets_root_path = get_assets_root_path() asset_path = assets_root_path + "/Isaac/Robots/UR10/ur10.usd" add_reference_to_stage(usd_path=asset_path, prim_path="/World/UR10") gripper_usd = assets_root_path + "/Isaac/Robots/UR10/Props/short_gripper.usd" # add_reference_to_stage(usd_path=gripper_usd, prim_path="/World/UR10/ee_link") # gripper = SurfaceGripper( # end_effector_prim_path="/World/UR10/ee_link", # translate=0.1611, # # direction="x", # direction="z", # ) # self._ur10 = self._world.scene.add( # SingleManipulator( # prim_path="/World/UR10", # name="my_ur10", # end_effector_prim_name="ee_link", # # gripper=gripper # gripper=None # ) # ) # self._ur10.set_joints_default_state( # positions=np.array([-np.pi/2, -np.pi/2, -np.pi/2, -np.pi/2, np.pi/2, 0]) # ) self._cube = self._world.scene.add( DynamicCuboid( name="cube", position=np.array([0.3, 0.3, 0.3]), prim_path="/World/Cube", scale=np.array([0.0515, 0.0515, 0.0515]), size=1.0, color=np.array([0, 0, 1]), ) ) def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() self.setup_robot() return async def setup_post_load(self): self._world = self.get_world() # self._my_controller = PickPlaceController( # name="pick_place_controller", # gripper=self._ur10.gripper, # robot_articulation=self._ur10 # ) # self._articulation_controller = self._ur10.get_articulation_controller() # self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): # self._sim_step += 1 # observations = self._world.get_observations() # actions = self._my_controller.forward( # picking_position=self._cube.get_local_pose()[0], # placing_position=np.array([0.7, 0.7, 0.0515 / 2.0]), # current_joint_positions=self._ur10.get_joint_positions(), # # end_effector_offset=np.array([0, 0, 0.02]), # end_effector_offset=np.array([0, 0, 0.03]), # ) # if self._my_controller.is_done(): # print("done picking and placing") # self._articulation_controller.apply_action(actions) return
4,054
Python
33.07563
98
0.594475
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIusbA/pick_and_place_twice.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.franka import Franka from omni.isaac.core.objects import DynamicCuboid from omni.isaac.franka.controllers import PickPlaceController import numpy as np class HelloManip(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() franka = world.scene.add(Franka(prim_path="/World/Fancy_Franka", name="fancy_franka")) world.scene.add( DynamicCuboid( prim_path="/World/random_cube1", name="fancy_cube1", position=np.array([0.3, 0.3, 0.3]), scale=np.array([0.0515, 0.0515, 0.0515]), color=np.array([0, 0, 1.0]), ) ) world.scene.add( DynamicCuboid( prim_path="/World/random_cube2", name="fancy_cube2", position=np.array([0.5, 0.0, 0.3]), scale=np.array([0.0515, 0.0515, 0.0515]), color=np.array([0, 0, 1.0]), ) ) self._event = 0 return async def setup_post_load(self): self._world = self.get_world() self._franka = self._world.scene.get_object("fancy_franka") self._fancy_cube1 = self._world.scene.get_object("fancy_cube1") self._fancy_cube2 = self._world.scene.get_object("fancy_cube2") # Initialize a pick and place controller self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) # World has pause, stop, play..etc # Note: if async version exists, use it in any async function is this workflow self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) await self._world.play_async() return # This function is called after Reset button is pressed # Resetting anything in the world should happen here async def setup_post_reset(self): self._controller.reset() self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) await self._world.play_async() return def physics_step(self, step_size): cube_position1, _ = self._fancy_cube1.get_world_pose() cube_position2, _ = self._fancy_cube2.get_world_pose() goal_position1 = np.array([-0.3, -0.3, 0.0515 / 2.0]) goal_position2 = np.array([-0.2, -0.3, 0.0515 / 2.0]) current_joint_positions = self._franka.get_joint_positions() if self._event == 0: actions = self._controller.forward( picking_position=cube_position1, placing_position=goal_position1, current_joint_positions=current_joint_positions, ) self._franka.apply_action(actions) elif self._event == 1: actions = self._controller.forward( picking_position=cube_position2, placing_position=goal_position2, current_joint_positions=current_joint_positions, ) self._franka.apply_action(actions) # Only for the pick and place controller, indicating if the state # machine reached the final state. if self._controller.is_done(): self._event += 1 self._controller.reset() return
4,083
Python
37.168224
94
0.607886
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIusbA/franka_usb_insertion.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.tasks import BaseTask from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.franka import Franka from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.isaac.core import SimulationContext from omni.physx.scripts import utils from pxr import PhysxSchema import numpy as np import carb def createSdfResolution(stage, primPath, kinematic=False): bodyPrim = stage.GetPrimAtPath(primPath) meshCollision = PhysxSchema.PhysxSDFMeshCollisionAPI.Apply(bodyPrim) meshCollision.CreateSdfResolutionAttr().Set(350) def createRigidBody(stage, primPath, kinematic=False): bodyPrim = stage.GetPrimAtPath(primPath) rigid_api = UsdPhysics.RigidBodyAPI.Apply(bodyPrim) rigid_api.CreateRigidBodyEnabledAttr(True) def addObjectsGeom(scene, name, scale, ini_pos, collision=None, mass=None, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) geom.set_local_scale(scale) geom.set_world_pose(position=ini_pos) geom.set_default_state(position=ini_pos, orientation=orientation) geom.set_collision_enabled(False) if collision is not None: geom.set_collision_enabled(True) geom.set_collision_approximation(collision) if mass is not None: massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom class FrankaUSBInsert(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # self.USB_MALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Male_modi1_3.usd" self.USB_MALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Male_modi1_3_pure.usd" self.USB_FEMALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Female_modi1_7.usd" return def setup_simulation(self): self._scene = PhysicsContext() # self._scene.set_solver_type("TGS") self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) # self._scene.set_friction_offset_threshold(0.01) # self._scene.set_friction_correlation_distance(0.0005) # self._scene.set_gpu_total_aggregate_pairs_capacity(10 * 1024) # self._scene.set_gpu_found_lost_pairs_capacity(10 * 1024) # self._scene.set_gpu_heap_capacity(64 * 1024 * 1024) # self._scene.set_gpu_found_lost_aggregate_pairs_capacity(10 * 1024) # # added because of new errors regarding collisionstacksize # physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(get_prim_at_path("/physicsScene")) # physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(76000000) # or whatever min is needed def add_light(self, stage): sphereLight = UsdLux.SphereLight.Define(stage, Sdf.Path("/World/SphereLight")) sphereLight.CreateRadiusAttr(0.2) sphereLight.CreateIntensityAttr(30000) sphereLight.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 2.0)) def setup_scene(self): self._world = self.get_world() # self._world.scene.add_default_ground_plane() self._world.scene.add_ground_plane() self.setup_simulation() self.add_light(self._world.scene.stage) # USB Male add_reference_to_stage(usd_path=self.USB_MALE_PATH, prim_path=f"/World/usb_male") createSdfResolution(self._world.scene.stage, "/World/usb_male") createRigidBody(self._world.scene.stage, "/World/usb_male") self._usb_male_geom = addObjectsGeom( self._world.scene, "usb_male", scale=np.array([0.02, 0.02, 0.02]), ini_pos=np.array([0.5, 0.2, -0.01]), # ini_pos=np.array([0.50037, -0.2, 0.06578]), collision="sdf", mass=None, orientation=None ) # USB FeMale add_reference_to_stage(usd_path=self.USB_FEMALE_PATH, prim_path=f"/World/usb_female") self._usb_female_geom = addObjectsGeom( self._world.scene, "usb_female", scale=np.array([0.02, 0.02, 0.02]), ini_pos=np.array([0.5, -0.2, -0.01]), collision=None, mass=None, orientation=None ) # Add Franka self._franka = self._world.scene.add( Franka( prim_path="/World/franka", name="franka", position=np.array([0.0, 0.0, 0.0]), ) ) self.simulation_context = SimulationContext() return async def setup_post_load(self): self._franka = self._world.scene.get_object("franka") # Initialize a pick and place controller self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) # World has pause, stop, play..etc # Note: if async version exists, use it in any async function is this workflow self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) # Auto play # await self._world.play_async() return def physics_step(self, step_size): usb_male_position, _ = self._usb_male_geom.get_world_pose() usb_female_position, _ = self._usb_female_geom.get_world_pose() goal_position = usb_female_position + np.array([0.0008, 0.0, 0.06578]) # ini_pos=np.array([0.50037, -0.2, 0.06578]), current_joint_positions = self._franka.get_joint_positions() actions = self._controller.forward( picking_position=usb_male_position, placing_position=goal_position, end_effector_orientation=euler_angles_to_quat( np.array([0, np.pi*3/4, 0]) ), end_effector_offset=np.array([0, 0, 0.035]), current_joint_positions=current_joint_positions, ) self._franka.apply_action(actions) # Only for the pick and place controller, indicating if the state # machine reached the final state. if self._controller.is_done(): self._world.pause() return async def setup_pre_reset(self): self._save_count = 0 self._event = 0 return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): return
7,980
Python
37.186603
101
0.650125
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIusbA/hello_manip_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.franka import Franka from omni.isaac.core.objects import DynamicCuboid from omni.isaac.franka.controllers import PickPlaceController import numpy as np from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.prims import define_prim, get_prim_at_path from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.prims.xform_prim import XFormPrim from omni.isaac.core.prims.rigid_prim import RigidPrim from omni.isaac.universal_robots import UR10 from omni.isaac.core.materials.physics_material import PhysicsMaterial from omni.isaac.core.physics_context.physics_context import PhysicsContext from pxr import Gf, PhysxSchema, Usd, UsdPhysics, UsdShade import carb class HelloManip(BaseSample): def __init__(self) -> None: super().__init__() # some global sim options: self._time_steps_per_second = 240 # 4.167ms aprx self._fsm_update_rate = 60 self._solverPositionIterations = 4 self._solverVelocityIterations = 1 self._solver_type = "TGS" self._ik_damping = 0.1 self._num_nuts = 2 self._num_bins = 2 # Asset Path from Nucleus # self._cube_asset_path = get_assets_root_path() + "/Isaac/Props/Blocks/nvidia_cube.usd" self._bin_asset_path = get_assets_root_path() + "/Isaac/Props/KLT_Bin/small_KLT.usd" self._nut_asset_path = get_assets_root_path() + "/Isaac/Samples/Examples/FrankaNutBolt/SubUSDs/Nut/M20_Nut_Tight_R256_Franka_SI.usd" self._bin_position = np.array([ [ 0.35, -0.25, 0.1], [ 0.35, 0.25, 0.1], ]) self._bins = [] self._bins_offset = 0.1 self._nuts_position = np.array([ [0.35, -0.22, 0.2], [0.30, -0.28, 0.2], ]) # self._nut_position_x = np.array([0.28, 0.4]) # self._nut_position_y = np.array([-0.35, -0.15]) # self._nut_position_z = 0.2 self._nuts = [] self._nuts_offset = 0.005 return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() prim = get_prim_at_path("/World/defaultGroundPlane") self._setup_simulation() franka = world.scene.add(Franka(prim_path="/World/Fancy_Franka", name="fancy_franka")) # ur10 = world.scene.add(UR10(prim_path="/World/UR10", name="UR10")) # RigidPrim Ref. # https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html#omni.isaac.core.prims.RigidPrim # GeometryPrim Ref. # https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html?highlight=geometryprim#omni.isaac.core.prims.GeometryPrim for bins in range(self._num_bins): add_reference_to_stage( usd_path=self._bin_asset_path, prim_path=f"/World/bin{bins}", ) _bin = world.scene.add( RigidPrim( prim_path=f"/World/bin{bins}", name=f"bin{bins}", position=self._bin_position[bins] / get_stage_units(), orientation=euler_angles_to_quat(np.array([np.pi, 0., 0.])), mass=0.1, # kg ) ) self._bins.append(_bin) for nut in range(self._num_nuts): # nut_position = np.array([ # np.random.randint(*(self._nut_position_x*100)) / 100, # np.random.randint(*(self._nut_position_y*100)) / 100, # self._nut_position_z, # ]) add_reference_to_stage( usd_path=self._nut_asset_path, prim_path=f"/World/nut{nut}", ) nut = world.scene.add( GeometryPrim( prim_path=f"/World/nut{nut}", name=f"nut{nut}_geom", position=self._nuts_position[nut] / get_stage_units(), collision=True, # mass=0.1, # kg ) ) self._nuts.append(nut) return def _setup_simulation(self): self._scene = PhysicsContext() self._scene.set_solver_type(self._solver_type) self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) self._scene.set_friction_offset_threshold(0.01) self._scene.set_friction_correlation_distance(0.0005) self._scene.set_gpu_total_aggregate_pairs_capacity(10 * 1024) self._scene.set_gpu_found_lost_pairs_capacity(10 * 1024) self._scene.set_gpu_heap_capacity(64 * 1024 * 1024) self._scene.set_gpu_found_lost_aggregate_pairs_capacity(10 * 1024) # added because of new errors regarding collisionstacksize physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(get_prim_at_path("/physicsScene")) physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(76000000) # or whatever min is needed async def setup_post_load(self): self._world = self.get_world() self._franka = self._world.scene.get_object("fancy_franka") # Initialize a pick and place controller self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) # World has pause, stop, play..etc # Note: if async version exists, use it in any async function is this workflow self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) await self._world.play_async() return # This function is called after Reset button is pressed # Resetting anything in the world should happen here async def setup_post_reset(self): self._controller.reset() self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) await self._world.play_async() return def physics_step(self, step_size): target_position, _ = self._nuts[0].get_world_pose() target_position[2] += self._nuts_offset goal_position, _ = self._bins[1].get_world_pose() goal_position[2] += self._bins_offset # print(goal_position) current_joint_positions = self._franka.get_joint_positions() actions = self._controller.forward( picking_position=target_position, placing_position=goal_position, current_joint_positions=current_joint_positions, ) self._franka.apply_action(actions) # Only for the pick and place controller, indicating if the state # machine reached the final state. if self._controller.is_done(): self._world.pause() return
7,645
Python
39.670213
163
0.616089
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIUR102F85/ur10_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.tasks import BaseTask from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.franka import Franka from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.isaac.core import SimulationContext from omni.physx.scripts import utils from pxr import PhysxSchema import numpy as np import carb from omni.isaac.universal_robots import UR10 from omni.isaac.universal_robots.controllers.rmpflow_controller import RMPFlowController from .custom_ur10 import UR10 as CustomUR10 class UR102F85(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._root_path = get_assets_root_path() # self.GRIPPER_PATH = self._server_root + "/Projects/ETRI/Gripper/2f85_fixed.usd" self.GRIPPER_PATH = self._root_path + "/Isaac/Robots/Robotiq/2F-85/2f85_instanceable.usd" self.UR_PATH = self._server_root + "/Projects/ETRI/Gripper/ur10_2f85_gripper.usd" self._sim_count = 0 self._gripper_opened = False return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # Add UR10 robot self._ur10 = self._world.scene.add( CustomUR10( prim_path="/World/UR10", name="UR10", usd_path=self.UR_PATH, attach_gripper=True, # Temp 실제로는 UR_PATH에 이미 그리펴 박혀있음 gripper_usd=self.GRIPPER_PATH ) ) self.simulation_context = SimulationContext() return async def setup_post_load(self): self._my_ur10 = self._world.scene.get_object("UR10") self._my_gripper = self._my_ur10.gripper # RMPFlow controller self._controller = RMPFlowController( name="target_follower_controller", robot_articulation=self._my_ur10 ) self._articulation_controller = self._my_ur10.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) return def physics_step(self, step_size): self._sim_count += 1 if self._sim_count < 200: # RMPFlow controller actions = self._controller.forward( target_end_effector_position=np.array([0.4, 0, 0.5]), # target_end_effector_orientation=ee_orientation, # w x y z => x y z w # 0 0 1 0 => 0 1 0 0 # 0 0 1 0 => 0 1 0 0 target_end_effector_orientation=np.array([1, 0, 0, 0]), ) self._articulation_controller.apply_action(actions) else: if self._sim_count % 100 == 0: # Gripper control if self._gripper_opened: self._gripper_opened = False self._my_gripper.close() else: self._gripper_opened = True self._my_gripper.open() return async def setup_pre_reset(self): self._save_count = 0 self._event = 0 return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): return
4,497
Python
33.868217
101
0.637314
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIUR102F85/custom_ur10.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import Optional import carb import numpy as np from omni.isaac.core.prims.rigid_prim import RigidPrim from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers.surface_gripper import SurfaceGripper from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper class UR10(Robot): """[summary] Args: prim_path (str): [description] name (str, optional): [description]. Defaults to "ur10_robot". usd_path (Optional[str], optional): [description]. Defaults to None. position (Optional[np.ndarray], optional): [description]. Defaults to None. orientation (Optional[np.ndarray], optional): [description]. Defaults to None. end_effector_prim_name (Optional[str], optional): [description]. Defaults to None. attach_gripper (bool, optional): [description]. Defaults to False. gripper_usd (Optional[str], optional): [description]. Defaults to "default". Raises: NotImplementedError: [description] """ def __init__( self, prim_path: str, name: str = "ur10_robot", usd_path: Optional[str] = None, position: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, end_effector_prim_name: Optional[str] = None, attach_gripper: bool = False, gripper_usd: Optional[str] = "default", ) -> None: prim = get_prim_at_path(prim_path) self._end_effector = None self._gripper = None self._end_effector_prim_name = end_effector_prim_name if not prim.IsValid(): if usd_path: add_reference_to_stage(usd_path=usd_path, prim_path=prim_path) else: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") return usd_path = assets_root_path + "/Isaac/Robots/UR10/ur10.usd" add_reference_to_stage(usd_path=usd_path, prim_path=prim_path) if self._end_effector_prim_name is None: self._end_effector_prim_path = prim_path + "/ee_link" else: self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name else: # TODO: change this if self._end_effector_prim_name is None: self._end_effector_prim_path = prim_path + "/ee_link" else: self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name super().__init__( prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None ) self._gripper_usd = gripper_usd if attach_gripper: if gripper_usd == "default": assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") return gripper_usd = assets_root_path + "/Isaac/Robots/UR10/Props/short_gripper.usd" add_reference_to_stage(usd_path=gripper_usd, prim_path=self._end_effector_prim_path) self._gripper = SurfaceGripper( end_effector_prim_path=self._end_effector_prim_path, translate=0.1611, direction="x" ) elif gripper_usd is None: carb.log_warn("Not adding a gripper usd, the gripper already exists in the ur10 asset") self._gripper = SurfaceGripper( end_effector_prim_path=self._end_effector_prim_path, translate=0.1611, direction="x" ) else: # add_reference_to_stage(usd_path=gripper_usd, prim_path=self._end_effector_prim_path) gripper_dof_names = ["left_inner_knuckle_joint", "right_inner_knuckle_joint"] gripper_open_position = np.array([0.0, 0.0]) gripper_closed_position = np.array([50.0, 50.0]) deltas = np.array([50.0, 50.0]) self._gripper = ParallelGripper( end_effector_prim_path=self._end_effector_prim_path, joint_prim_names=gripper_dof_names, joint_opened_positions=gripper_open_position, joint_closed_positions=gripper_closed_position, action_deltas=deltas, ) self._attach_gripper = attach_gripper return @property def attach_gripper(self) -> bool: """[summary] Returns: bool: [description] """ return self._attach_gripper @property def end_effector(self) -> RigidPrim: """[summary] Returns: RigidPrim: [description] """ return self._end_effector @property def gripper(self) -> SurfaceGripper: """[summary] Returns: SurfaceGripper: [description] """ return self._gripper def initialize(self, physics_sim_view=None) -> None: """[summary]""" super().initialize(physics_sim_view) # if self._attach_gripper: # self._gripper.initialize(physics_sim_view=physics_sim_view, articulation_num_dofs=self.num_dof) self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector") self.disable_gravity() self._end_effector.initialize(physics_sim_view) self._gripper.initialize( physics_sim_view=physics_sim_view, articulation_apply_action_func=self.apply_action, get_joint_positions_func=self.get_joint_positions, set_joint_positions_func=self.set_joint_positions, dof_names=self.dof_names, ) return def post_reset(self) -> None: """[summary]""" Robot.post_reset(self) # self._end_effector.post_reset() self._gripper.post_reset() return
6,705
Python
40.9125
116
0.604027
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIFrankaGripper/custom_franka.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import List, Optional import carb import numpy as np from omni.isaac.core.prims.rigid_prim import RigidPrim from omni.isaac.core.robots.robot import Robot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper class Franka(Robot): """[summary] Args: prim_path (str): [description] name (str, optional): [description]. Defaults to "franka_robot". usd_path (Optional[str], optional): [description]. Defaults to None. position (Optional[np.ndarray], optional): [description]. Defaults to None. orientation (Optional[np.ndarray], optional): [description]. Defaults to None. end_effector_prim_name (Optional[str], optional): [description]. Defaults to None. gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None. gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None. gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None. """ def __init__( self, prim_path: str, name: str = "franka_robot", usd_path: Optional[str] = None, position: Optional[np.ndarray] = None, orientation: Optional[np.ndarray] = None, end_effector_prim_name: Optional[str] = None, gripper_dof_names: Optional[List[str]] = None, gripper_open_position: Optional[np.ndarray] = None, gripper_closed_position: Optional[np.ndarray] = None, deltas: Optional[np.ndarray] = None, ) -> None: prim = get_prim_at_path(prim_path) self._end_effector = None self._gripper = None self._end_effector_prim_name = end_effector_prim_name if not prim.IsValid(): if usd_path: add_reference_to_stage(usd_path=usd_path, prim_path=prim_path) else: assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") usd_path = assets_root_path + "/Isaac/Robots/Franka/franka.usd" add_reference_to_stage(usd_path=usd_path, prim_path=prim_path) if self._end_effector_prim_name is None: self._end_effector_prim_path = prim_path + "/panda_leftfinger" else: self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name if gripper_dof_names is None: gripper_dof_names = ["panda_finger_joint1", "panda_finger_joint2"] if gripper_open_position is None: gripper_open_position = np.array([0.05, 0.05]) / get_stage_units() if gripper_closed_position is None: gripper_closed_position = np.array([0.0, 0.0]) else: if self._end_effector_prim_name is None: self._end_effector_prim_path = prim_path + "/panda_leftfinger" else: self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name if gripper_dof_names is None: gripper_dof_names = ["panda_finger_joint1", "panda_finger_joint2"] if gripper_open_position is None: gripper_open_position = np.array([0.05, 0.05]) / get_stage_units() if gripper_closed_position is None: gripper_closed_position = np.array([0.0, 0.0]) super().__init__( prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None ) print(f"self._end_effector_prim_path: {self._end_effector_prim_path}") if gripper_dof_names is not None: if deltas is None: deltas = np.array([0.05, 0.05]) / get_stage_units() self._gripper = ParallelGripper( end_effector_prim_path=self._end_effector_prim_path, joint_prim_names=gripper_dof_names, joint_opened_positions=gripper_open_position, joint_closed_positions=gripper_closed_position, action_deltas=deltas, ) return @property def end_effector(self) -> RigidPrim: """[summary] Returns: RigidPrim: [description] """ return self._end_effector @property def gripper(self) -> ParallelGripper: """[summary] Returns: ParallelGripper: [description] """ return self._gripper def initialize(self, physics_sim_view=None) -> None: """[summary]""" super().initialize(physics_sim_view) self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector") self._end_effector.initialize(physics_sim_view) self._gripper.initialize( physics_sim_view=physics_sim_view, articulation_apply_action_func=self.apply_action, get_joint_positions_func=self.get_joint_positions, set_joint_positions_func=self.set_joint_positions, dof_names=self.dof_names, ) return def post_reset(self) -> None: """[summary]""" super().post_reset() self._gripper.post_reset() self._articulation_controller.switch_dof_control_mode( dof_index=self.gripper.joint_dof_indicies[0], mode="position" ) self._articulation_controller.switch_dof_control_mode( dof_index=self.gripper.joint_dof_indicies[1], mode="position" ) return
6,227
Python
42.552447
116
0.61956
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIFrankaGripper/franka_gripper.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core import SimulationContext import carb from .custom_franka import Franka as CustomFranka class FrankaGripper(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._sim_count = 0 self._gripper_opened = False return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # Add Franka robot prim_path = "/World/Fancy_Franka" self._franka = self._world.scene.add( CustomFranka( prim_path=prim_path, name="fancy_franka" ) ) self.simulation_context = SimulationContext() return async def setup_post_load(self): self._my_franka = self._world.scene.get_object("fancy_franka") self._my_gripper = self._my_franka.gripper self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) return def physics_step(self, step_size): self._sim_count += 1 if self._sim_count % 100 == 0: if self._gripper_opened: self._gripper_opened = False self._my_gripper.close() else: self._gripper_opened = True self._my_gripper.open() self._sim_count = 0 return async def setup_pre_reset(self): self._save_count = 0 self._event = 0 return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): return
2,451
Python
27.511628
101
0.628315
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloWorld/hello_world.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from omni.isaac.core.objects import DynamicCuboid class HelloWorld(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() fancy_cube = world.scene.add( DynamicCuboid( prim_path="/World/random_cube", # The prim path of the cube in the USD stage name="fancy_cube", # The unique name used to retrieve the object from the scene later on position=np.array([0, 0, 1.0]), # Using the current stage units which is in meters by default. scale=np.array([0.5015, 0.5015, 0.5015]), # most arguments accept mainly numpy arrays. color=np.array([0, 0, 1.0]), # RGB channels, going from 0-1 )) return # Here we assign the class's variables # this function is called after load button is pressed # regardless starting from an empty stage or not # this is called after setup_scene and after # one physics time step to propagate appropriate # physics handles which are needed to retrieve # many physical properties of the different objects async def setup_post_load(self): self._world = self.get_world() self._cube = self._world.scene.get_object("fancy_cube") self._world.add_physics_callback("sim_step", callback_fn=self.print_cube_info) #callback names have to be unique return # here we define the physics callback to be called before each physics step, all physics callbacks must take # step_size as an argument def print_cube_info(self, step_size): position, orientation = self._cube.get_world_pose() linear_velocity = self._cube.get_linear_velocity() # will be shown on terminal print("Cube position is : " + str(position)) print("Cube's orientation is : " + str(orientation)) print("Cube's linear velocity is : " + str(linear_velocity)) # async def setup_pre_reset(self): # return # async def setup_post_reset(self): # return # def world_cleanup(self): # return
2,802
Python
40.83582
120
0.673091
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorSpamRandomPose/replicator_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.stage import open_stage import omni.replicator.core as rep import carb.settings import numpy as np from os.path import expanduser import datetime now = datetime.datetime.now() class SpamRandomPose(BaseSample): def __init__(self) -> None: super().__init__() self._isaac_assets_path = get_assets_root_path() self._nucleus_server_path = "omniverse://localhost/NVIDIA/" self.SPAM_URL = self._isaac_assets_path + "/Isaac/Props/YCB/Axis_Aligned/010_potted_meat_can.usd" # Enable scripts carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True) # Disable capture on play and async rendering carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False) carb.settings.get_settings().set("/omni/replicator/asyncRendering", False) carb.settings.get_settings().set("/app/asyncRendering", False) now_str = now.strftime("%Y-%m-%d_%H:%M:%S") self._out_dir = str(expanduser("~") + "/Documents/spam_data_" + now_str) self._sim_step = 0 self.spam_can = None return def random_props(self): with self.spam_can: rep.modify.pose( position=rep.distribution.uniform((-0.1, -0.1, 0.5), (0.1, 0.1, 0.5)), rotation=rep.distribution.uniform((-180,-180, -180), (180, 180, 180)), scale = rep.distribution.uniform((0.8), (1.2)), ) def random_sphere_lights(self): with self.rp_light: rep.modify.pose( position=rep.distribution.uniform((-0.5, -0.5, 1.0), (0.5, 0.5, 1.0)), ) def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() self.cam = rep.create.camera(position=(0, 0, 2), look_at=(0, 0, 0)) self.rp = rep.create.render_product(self.cam, resolution=(1024, 1024)) rep.randomizer.register(self.random_props) rep.randomizer.register(self.random_sphere_lights) self.spam_can = rep.create.from_usd(self.SPAM_URL) with self.spam_can: rep.modify.semantics([('class', "spam_can")]) rep.modify.pose( position=rep.distribution.uniform((-0.1, -0.1, 0.5), (0.1, 0.1, 0.5)), rotation=rep.distribution.uniform((-180,-180, -180), (180, 180, 180)), scale = rep.distribution.uniform((0.8), (1.2)), ) self.rp_light = rep.create.light( light_type="sphere", temperature=3000, intensity=5000.0, position=(0.0, 0.0, 1.0), scale=0.5, count=1 ) return async def setup_post_load(self): with rep.trigger.on_frame(num_frames=20): rep.modify.timeline(5, "frame") rep.randomizer.random_props() rep.randomizer.random_sphere_lights() # Create a writer and apply the augmentations to its corresponding annotators self._writer = rep.WriterRegistry.get("BasicWriter") print(f"Writing data to: {self._out_dir}") self._writer.initialize( output_dir=self._out_dir, rgb=True, bounding_box_2d_tight=True, # distance_to_camera=True ) # Attach render product to writer self._writer.attach([self.rp]) return
4,008
Python
35.117117
105
0.609531
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ManipPickandPlace/pick_place_example.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers import ParallelGripper from omni.isaac.manipulators import SingleManipulator import omni.isaac.core.tasks as tasks from typing import Optional import numpy as np import carb from .ik_solver import KinematicsSolver from .controllers.rmpflow import RMPFlowController from .controllers.pick_place import PickPlaceController # Inheriting from the base class PickPlace class PickPlace(tasks.PickPlace): def __init__( self, name: str = "denso_pick_place", cube_initial_position: Optional[np.ndarray] = None, cube_initial_orientation: Optional[np.ndarray] = None, target_position: Optional[np.ndarray] = None, offset: Optional[np.ndarray] = None, ) -> None: tasks.PickPlace.__init__( self, name=name, cube_initial_position=cube_initial_position, cube_initial_orientation=cube_initial_orientation, target_position=target_position, cube_size=np.array([0.0515, 0.0515, 0.0515]), offset=offset, ) carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/cobotta_pro_900/cobotta_pro_900/cobotta_pro_900.usd" return def set_robot(self) -> SingleManipulator: #TODO: change the asset path here # laptop add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/cobotta") gripper = ParallelGripper( end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link", joint_prim_names=["finger_joint", "right_outer_knuckle_joint"], joint_opened_positions=np.array([0, 0]), joint_closed_positions=np.array([0.628, -0.628]), action_deltas=np.array([-0.2, 0.2]) ) manipulator = SingleManipulator( prim_path="/World/cobotta", name="cobotta_robot", end_effector_prim_name="onrobot_rg6_base_link", gripper=gripper ) joints_default_positions = np.zeros(12) joints_default_positions[7] = 0.628 joints_default_positions[8] = 0.628 manipulator.set_joints_default_state(positions=joints_default_positions) return manipulator class PickandPlaceExample(BaseSample): def __init__(self) -> None: super().__init__() self._articulation_controller = None # simulation step counter self._sim_step = 0 self._target_position = np.array([-0.3, 0.6, 0]) self._target_position[2] = 0.0515 / 2.0 return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # We add the task to the world here my_task = PickPlace( name="denso_pick_place", target_position=self._target_position ) self._world.add_task(my_task) return async def setup_post_load(self): self._world = self.get_world() self._my_denso = self._world.scene.get_object("cobotta_robot") self._my_controller = PickPlaceController( name="controller", robot_articulation=self._my_denso, gripper=self._my_denso.gripper ) self._task_params = self._world.get_task("denso_pick_place").get_params() self._articulation_controller = self._my_denso.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_cb) return async def setup_post_reset(self): self._my_controller.reset() await self._world.play_async() return def sim_step_cb(self, step_size): observations = self._world.get_observations() actions = self._my_controller.forward( picking_position=observations[self._task_params["cube_name"]["value"]]["position"], placing_position=observations[self._task_params["cube_name"]["value"]]["target_position"], current_joint_positions=observations[self._task_params["robot_name"]["value"]]["joint_positions"], # This offset needs tuning as well end_effector_offset=np.array([0, 0, 0.25]), ) if self._my_controller.is_done(): print("done picking and placing") self._articulation_controller.apply_action(actions) return
5,318
Python
35.431507
117
0.646108
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/WheeledRobotSummitO3WheelROS2/robotnik_summit.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.physx.scripts import deformableUtils, physicsUtils from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController import omni.graph.core as og import numpy as np import usdrt.Sdf import carb import omni class RobotnikSummit(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # wheel models referenced from : https://git.openlogisticsfoundation.org/silicon-economy/simulation-model/o3dynsimmodel self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/Collected_summit_xl_omni_four/summit_xl_omni_four.usd" self._wheel_radius = np.array([ 0.127, 0.127, 0.127, 0.127 ]) self._wheel_positions = np.array([ [0.229, 0.235, 0.11], [0.229, -0.235, 0.11], [-0.229, 0.235, 0.11], [-0.229, -0.235, 0.11], ]) self._wheel_orientations = np.array([ [0.7071068, 0, 0, 0.7071068], [0.7071068, 0, 0, -0.7071068], [0.7071068, 0, 0, 0.7071068], [0.7071068, 0, 0, -0.7071068], ]) self._mecanum_angles = np.array([ -135.0, -45.0, -45.0, -135.0, ]) self._wheel_axis = np.array([1, 0, 0]) self._up_axis = np.array([0, 0, 1]) self._targetPrim = "/World/Summit/summit_xl_base_link" self._domain_id = 30 return def add_background(self): bg_path = self._server_root + "/Projects/RBROS2/LibraryNoRoof/Library_No_Roof_Collide_Light.usd" add_reference_to_stage( usd_path=bg_path, prim_path=f"/World/Library_No_Roof", ) bg_mesh = UsdGeom.Mesh.Get(self._stage, "/World/Library_No_Roof") # physicsUtils.set_or_add_translate_op(bg_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.0)) # physicsUtils.set_or_add_orient_op(bg_mesh, orient=Gf.Quatf(-0.5, -0.5, -0.5, -0.5)) physicsUtils.set_or_add_scale_op(bg_mesh, scale=Gf.Vec3f(0.01, 0.01, 0.01)) def og_setup(self): try: og.Controller.edit( {"graph_path": "/ROS2HolonomicTwist", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("subscribeTwist", "omni.isaac.ros2_bridge.ROS2SubscribeTwist"), ("scaleToFromStage", "omni.isaac.core_nodes.OgnIsaacScaleToFromStageUnit"), ("breakAngVel", "omni.graph.nodes.BreakVector3"), ("breakLinVel", "omni.graph.nodes.BreakVector3"), ("angvelGain", "omni.graph.nodes.ConstantDouble"), ("angvelMult", "omni.graph.nodes.Multiply"), ("linXGain", "omni.graph.nodes.ConstantDouble"), ("linXMult", "omni.graph.nodes.Multiply"), ("linYGain", "omni.graph.nodes.ConstantDouble"), ("linYMult", "omni.graph.nodes.Multiply"), ("velVec3", "omni.graph.nodes.MakeVector3"), ("mecanumAng", "omni.graph.nodes.ConstructArray"), ("holonomicCtrl", "omni.isaac.wheeled_robots.HolonomicController"), ("upAxis", "omni.graph.nodes.ConstantDouble3"), ("wheelAxis", "omni.graph.nodes.ConstantDouble3"), ("wheelOrientation", "omni.graph.nodes.ConstructArray"), ("wheelPosition", "omni.graph.nodes.ConstructArray"), ("wheelRadius", "omni.graph.nodes.ConstructArray"), ("jointNames", "omni.graph.nodes.ConstructArray"), ("articulation", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("subscribeTwist.inputs:topicName", "cmd_vel"), ("angvelGain.inputs:value", -0.514), ("linXGain.inputs:value", 2.325), ("linYGain.inputs:value", 3.0), ("mecanumAng.inputs:arraySize", 4), ("mecanumAng.inputs:arrayType", "double[]"), ("mecanumAng.inputs:input0", self._mecanum_angles[0]), ("mecanumAng.inputs:input1", self._mecanum_angles[1]), ("mecanumAng.inputs:input2", self._mecanum_angles[2]), ("mecanumAng.inputs:input3", self._mecanum_angles[3]), ("holonomicCtrl.inputs:angularGain", 1.0), ("holonomicCtrl.inputs:linearGain", 1.0), ("holonomicCtrl.inputs:maxWheelSpeed", 1200.0), ("upAxis.inputs:value", self._up_axis), ("wheelAxis.inputs:value", self._wheel_axis), ("wheelOrientation.inputs:arraySize", 4), ("wheelOrientation.inputs:arrayType", "double[4][]"), ("wheelOrientation.inputs:input0", self._wheel_orientations[0]), ("wheelOrientation.inputs:input1", self._wheel_orientations[1]), ("wheelOrientation.inputs:input2", self._wheel_orientations[2]), ("wheelOrientation.inputs:input3", self._wheel_orientations[3]), ("wheelPosition.inputs:arraySize", 4), ("wheelPosition.inputs:arrayType", "double[3][]"), ("wheelPosition.inputs:input0", self._wheel_positions[0]), ("wheelPosition.inputs:input1", self._wheel_positions[1]), ("wheelPosition.inputs:input2", self._wheel_positions[2]), ("wheelPosition.inputs:input3", self._wheel_positions[3]), ("wheelRadius.inputs:arraySize", 4), ("wheelRadius.inputs:arrayType", "double[]"), ("wheelRadius.inputs:input0", self._wheel_radius[0]), ("wheelRadius.inputs:input1", self._wheel_radius[1]), ("wheelRadius.inputs:input2", self._wheel_radius[2]), ("wheelRadius.inputs:input3", self._wheel_radius[3]), ("jointNames.inputs:arraySize", 4), ("jointNames.inputs:arrayType", "token[]"), ("jointNames.inputs:input0", "fl_joint"), ("jointNames.inputs:input1", "fr_joint"), ("jointNames.inputs:input2", "rl_joint"), ("jointNames.inputs:input3", "rr_joint"), ("articulation.inputs:targetPrim", [usdrt.Sdf.Path(self._targetPrim)]), ("articulation.inputs:robotPath", self._targetPrim), ("articulation.inputs:usePath", False), ], og.Controller.Keys.CREATE_ATTRIBUTES: [ ("mecanumAng.inputs:input1", "double"), ("mecanumAng.inputs:input2", "double"), ("mecanumAng.inputs:input3", "double"), ("wheelOrientation.inputs:input1", "double[4]"), ("wheelOrientation.inputs:input2", "double[4]"), ("wheelOrientation.inputs:input3", "double[4]"), ("wheelPosition.inputs:input1", "double[3]"), ("wheelPosition.inputs:input2", "double[3]"), ("wheelPosition.inputs:input3", "double[3]"), ("wheelRadius.inputs:input1", "double"), ("wheelRadius.inputs:input2", "double"), ("wheelRadius.inputs:input3", "double"), ("jointNames.inputs:input1", "token"), ("jointNames.inputs:input2", "token"), ("jointNames.inputs:input3", "token"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "subscribeTwist.inputs:execIn"), ("context.outputs:context", "subscribeTwist.inputs:context"), ("subscribeTwist.outputs:angularVelocity", "breakAngVel.inputs:tuple"), ("subscribeTwist.outputs:linearVelocity", "scaleToFromStage.inputs:value"), ("scaleToFromStage.outputs:result", "breakLinVel.inputs:tuple"), ("breakAngVel.outputs:z", "angvelMult.inputs:a"), ("angvelGain.inputs:value", "angvelMult.inputs:b"), ("breakLinVel.outputs:x", "linXMult.inputs:a"), ("linXGain.inputs:value", "linXMult.inputs:b"), ("breakLinVel.outputs:y", "linYMult.inputs:a"), ("linYGain.inputs:value", "linYMult.inputs:b"), ("angvelMult.outputs:product", "velVec3.inputs:z"), ("linXMult.outputs:product", "velVec3.inputs:x"), ("linYMult.outputs:product", "velVec3.inputs:y"), ("onPlaybackTick.outputs:tick", "holonomicCtrl.inputs:execIn"), ("velVec3.outputs:tuple", "holonomicCtrl.inputs:velocityCommands"), ("mecanumAng.outputs:array", "holonomicCtrl.inputs:mecanumAngles"), ("onPlaybackTick.outputs:tick", "holonomicCtrl.inputs:execIn"), ("upAxis.inputs:value", "holonomicCtrl.inputs:upAxis"), ("wheelAxis.inputs:value", "holonomicCtrl.inputs:wheelAxis"), ("wheelOrientation.outputs:array", "holonomicCtrl.inputs:wheelOrientations"), ("wheelPosition.outputs:array", "holonomicCtrl.inputs:wheelPositions"), ("wheelRadius.outputs:array", "holonomicCtrl.inputs:wheelRadius"), ("onPlaybackTick.outputs:tick", "articulation.inputs:execIn"), ("holonomicCtrl.outputs:jointVelocityCommand", "articulation.inputs:velocityCommand"), ("jointNames.outputs:array", "articulation.inputs:jointNames"), ], }, ) og.Controller.edit( {"graph_path": "/ROS2Odom", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("computeOdom", "omni.isaac.core_nodes.IsaacComputeOdometry"), ("publishOdom", "omni.isaac.ros2_bridge.ROS2PublishOdometry"), ("publishRawTF", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("computeOdom.inputs:chassisPrim", [usdrt.Sdf.Path(self._targetPrim)]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "computeOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishRawTF.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishOdom.inputs:timeStamp"), ("readSimTime.outputs:simulationTime", "publishRawTF.inputs:timeStamp"), ("context.outputs:context", "publishOdom.inputs:context"), ("context.outputs:context", "publishRawTF.inputs:context"), ("computeOdom.outputs:angularVelocity", "publishOdom.inputs:angularVelocity"), ("computeOdom.outputs:linearVelocity", "publishOdom.inputs:linearVelocity"), ("computeOdom.outputs:orientation", "publishOdom.inputs:orientation"), ("computeOdom.outputs:position", "publishOdom.inputs:position"), ("computeOdom.outputs:orientation", "publishRawTF.inputs:rotation"), ("computeOdom.outputs:position", "publishRawTF.inputs:translation"), ], }, ) except Exception as e: print(e) def setup_scene(self): world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.add_background() # world.scene.add_default_ground_plane() add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/Summit") self._wheeled_robot = WheeledRobot( prim_path=self._targetPrim, name="my_summit", wheel_dof_names=[ "fl_joint", "fr_joint", "rl_joint", "rr_joint", ], create_robot=True, usd_path=self._robot_path, position=np.array([0, 0.0, 0.02]), orientation=np.array([1.0, 0.0, 0.0, 0.0]), ) self._save_count = 0 self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) self.og_setup() return async def setup_post_load(self): self._world = self.get_world() self._wheeled_robot.initialize() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._save_count += 1 return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._world.pause() return async def setup_post_reset(self): self._summit_controller.reset() await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
15,753
Python
49.49359
132
0.538437
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactoryDemoROS2/garage_conveyor.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.examples.base_sample import BaseSample # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import Sdf, UsdLux, Gf, UsdPhysics, PhysxSchema from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from omni.isaac.sensor import Camera from .geom_utils import createRigidBody, addObjectsGeom from .inference_utils import triton_inference import omni.replicator.core as rep import omni.graph.core as og import numpy as np import random import carb import omni import cv2 PROPS = { 'spam' : "/Isaac/Props/YCB/Axis_Aligned/010_potted_meat_can.usd", 'jelly' : "/Isaac/Props/YCB/Axis_Aligned/009_gelatin_box.usd", 'tuna' : "/Isaac/Props/YCB/Axis_Aligned/007_tuna_fish_can.usd", 'cleanser' : "/Isaac/Props/YCB/Axis_Aligned/021_bleach_cleanser.usd", 'tomato_soup' : "/Isaac/Props/YCB/Axis_Aligned/005_tomato_soup_can.usd" } class GarageConveyor(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._nucleus_server = get_assets_root_path() self._bin_path = self._nucleus_server + "/Isaac/Props/KLT_Bin/small_KLT_visual.usd" self._bin_mass = 10.5 self._bin_scale = np.array([2.0, 2.0, 1.0]) self._bin_position = np.array([7.0, -0.2, 1.0]) self._plane_scale = np.array([0.4, 0.24, 1.0]) self._plane_position = np.array([-1.75, 1.2, 0.9]) self._plane_rotation = np.array([0.0, 0.0, 0.0]) self._gemini_usd_path = self._server_root + "/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Sensors/Orbbec/Gemini 2/orbbec_gemini2_V1.0.usd" self._gemini_position = np.array([-1.75, 1.2, 1.5]) self._gemini_rotation = np.array([0.0, 0.7071068, -0.7071068, 0]) self._sim_count = 0 self._is_captured = False return def add_background(self): self._world = self.get_world() bg_path = self._server_root + "/Projects/RBROS2/ConveyorGarage/Franka_Garage_Empty.usd" add_reference_to_stage(usd_path=bg_path, prim_path=f"/World/Garage") def add_camera(self): self._camera = Camera( prim_path="/World/normal_camera", position=np.array([-1.75, 1.2, 2.0]), frequency=30, resolution=(1280, 720), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 90, 180 ]), degrees=True), ) self._camera.set_focal_length(2.0) self._camera.initialize() self._camera.add_motion_vectors_to_frame() def add_ros_camera(self, scene): add_reference_to_stage(usd_path=self._gemini_usd_path, prim_path=f"/World/Orbbec_Gemini2") self._cam_ref_geom = addObjectsGeom( scene, "Orbbec_Gemini2", np.array([1.0, 1.0, 1.0]), self._gemini_position, 0.0, self._gemini_rotation ) ldm_light = self._stage.GetPrimAtPath("/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_ldm/camera_ldm/RectLight") ldm_light_intensity = ldm_light.GetAttribute("intensity") ldm_light_intensity.Set(0) def og_setup(self): camprim1 = "/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_ir_left/camera_left/Stream_depth" camprim2 = "/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_rgb/camera_rgb/Stream_rgb" try: og.Controller.edit( {"graph_path": "/GeminiROS2OG", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("RenderProduct1", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RenderProduct2", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("RenderProduct1.inputs:cameraPrim", camprim1), ("RenderProduct2.inputs:cameraPrim", camprim2), ("RGBPublish.inputs:topicName", "rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:topicName", "depth_camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ], og.Controller.Keys.CONNECT: [ ("OnPlaybackTick.outputs:tick", "RenderProduct1.inputs:execIn"), ("OnPlaybackTick.outputs:tick", "RenderProduct2.inputs:execIn"), ("RenderProduct1.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("RenderProduct2.outputs:execOut", "RGBPublish.inputs:execIn"), ("RenderProduct1.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ("RenderProduct2.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ], }, ) except Exception as e: print(e) def add_bin(self, scene): add_reference_to_stage(usd_path=self._bin_path, prim_path="/World/inference_bin") createRigidBody(self._stage, "/World/inference_bin", False) self._bin_ref_geom = addObjectsGeom( scene, "inference_bin", self._bin_scale, self._bin_position, self._bin_mass, orientation=None ) def add_random_objects(self, scene, num_objects=3): choicelist = [random.choice( list(PROPS.keys()) ) for i in range(num_objects)] for _object in choicelist: prim_path = self._nucleus_server + PROPS[_object] prim_name = f"{_object}_{random.randint(0, 100)}" add_reference_to_stage(usd_path=prim_path, prim_path=f"/World/{prim_name}") createRigidBody(self._stage, f"/World/{prim_name}") position = ( random.uniform(6.8, 7.05), random.uniform(-0.3, -0.1), random.uniform(1.1, 1.3) ) prim_geom = addObjectsGeom( scene, prim_name, np.array([1.0, 1.0, 1.0]), position, 0.02 ) def add_light(self): distantLight = UsdLux.CylinderLight.Define(self._stage, Sdf.Path("/World/cylinderLight")) distantLight.CreateIntensityAttr(60000) distantLight.AddTranslateOp().Set(Gf.Vec3f(-1.2, 0.9, 3.0)) distantLight.AddScaleOp().Set((0.1, 4.0, 0.1)) distantLight.AddRotateXYZOp().Set((0, 0, 90)) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() self.add_background() self.add_light() self.add_camera() self.add_bin(self._world.scene) self.add_random_objects(self._world.scene, num_objects=3) # Uncomment belows if you wanna activate ROS 2 topic publishing # self.add_ros_camera(self._world.scene) # self.og_setup() self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) return async def setup_post_load(self): self._world = self.get_world() self._world.scene.enable_bounding_boxes_computations() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique self._cur_bin_position, _ = self._bin_ref_geom.get_world_pose() self._prev_bin_position = self._cur_bin_position return def physics_callback(self, step_size): self._cur_bin_position, _ = self._bin_ref_geom.get_world_pose() bin_vel = np.linalg.norm(self._cur_bin_position - self._prev_bin_position) if self._cur_bin_position[1] > 1.1 and bin_vel < 1e-5 and not self._is_captured: print("capture image...") self._is_captured = True self._camera.get_current_frame() cur_img = self._camera.get_rgba()[:, :, :3] # TEST # target_width, target_height = 1280, 720 # image_bgr = cv2.resize(cur_img, (target_width, target_height)) # image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) # cv2.imwrite("/home/kimsooyoung/Documents/test_img.png", image_rgb) triton_inference(cur_img) else: # print(f"bin_vel: {bin_vel} / self._is_captured : {self._is_captured} / self._cur_bin_position[1]: {self._cur_bin_position[1]}") pass self._sim_count += 1 self._prev_bin_position = self._cur_bin_position async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): self._world.pause() return
10,350
Python
41.772727
141
0.603671
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactoryDemoROS2/geom_utils.py
from omni.physx.scripts import utils from omni.isaac.core.prims.geometry_prim import GeometryPrim from pxr import Gf, PhysxSchema, Usd, UsdPhysics, UsdShade, UsdGeom, Sdf, Tf, UsdLux import numpy as np def createRigidBody(stage, primPath, kinematic=False): bodyPrim = stage.GetPrimAtPath(primPath) utils.setRigidBody(bodyPrim, "convexDecomposition", kinematic) def addObjectsGeom(scene, name, scale, ini_pos, mass, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) geom.set_local_scale(scale) geom.set_world_pose(position=ini_pos) geom.set_collision_approximation("convexDecomposition") geom.set_default_state(position=ini_pos, orientation=orientation) massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom
1,072
Python
37.321427
96
0.70709
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/WheeledRobotLimoDiffROS2/limo_diff_drive.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root import omni.graph.core as og import numpy as np import usdrt.Sdf import carb class LimoDiffDrive(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/limo_base.usd" self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/limo_diff_thin.usd" self._domain_id = 30 self._maxLinearSpeed = 1e6 self._wheelDistance = 0.43 self._wheelRadius = 0.045 self._front_jointNames = ["rear_left_wheel", "rear_right_wheel"] self._rear_jointNames = ["front_left_wheel", "front_right_wheel"] self._contorl_targetPrim = "/World/Limo/base_link" self._odom_targetPrim = "/World/Limo/base_footprint" return def og_setup(self): try: # OG reference : https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_drive_turtlebot.html og.Controller.edit( {"graph_path": "/ROS2DiffDrive", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("subscribeTwist", "omni.isaac.ros2_bridge.ROS2SubscribeTwist"), ("scaleToFromStage", "omni.isaac.core_nodes.OgnIsaacScaleToFromStageUnit"), ("breakLinVel", "omni.graph.nodes.BreakVector3"), ("breakAngVel", "omni.graph.nodes.BreakVector3"), ("diffController", "omni.isaac.wheeled_robots.DifferentialController"), ("artControllerRear", "omni.isaac.core_nodes.IsaacArticulationController"), ("artControllerFront", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("diffController.inputs:maxLinearSpeed", self._maxLinearSpeed), ("diffController.inputs:wheelDistance", self._wheelDistance), ("diffController.inputs:wheelRadius", self._wheelRadius), ("artControllerRear.inputs:jointNames", self._front_jointNames), ("artControllerRear.inputs:targetPrim", [usdrt.Sdf.Path(self._contorl_targetPrim)]), ("artControllerRear.inputs:usePath", False), ("artControllerFront.inputs:jointNames", self._rear_jointNames), ("artControllerFront.inputs:targetPrim", [usdrt.Sdf.Path(self._contorl_targetPrim)]), ("artControllerFront.inputs:usePath", False), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "subscribeTwist.inputs:execIn"), ("onPlaybackTick.outputs:tick", "artControllerRear.inputs:execIn"), ("onPlaybackTick.outputs:tick", "artControllerFront.inputs:execIn"), ("context.outputs:context", "subscribeTwist.inputs:context"), ("subscribeTwist.outputs:execOut", "diffController.inputs:execIn"), ("subscribeTwist.outputs:angularVelocity", "breakAngVel.inputs:tuple"), ("subscribeTwist.outputs:linearVelocity", "scaleToFromStage.inputs:value"), ("scaleToFromStage.outputs:result", "breakLinVel.inputs:tuple"), ("breakAngVel.outputs:z", "diffController.inputs:angularVelocity"), ("breakLinVel.outputs:x", "diffController.inputs:linearVelocity"), ("diffController.outputs:velocityCommand", "artControllerRear.inputs:velocityCommand"), ("diffController.outputs:velocityCommand", "artControllerFront.inputs:velocityCommand"), ], }, ) # OG reference : https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_tf.html og.Controller.edit( {"graph_path": "/ROS2Odom", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("computeOdom", "omni.isaac.core_nodes.IsaacComputeOdometry"), ("publishOdom", "omni.isaac.ros2_bridge.ROS2PublishOdometry"), ("publishRawTF", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("computeOdom.inputs:chassisPrim", [usdrt.Sdf.Path(self._odom_targetPrim)]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "computeOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishRawTF.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishOdom.inputs:timeStamp"), ("readSimTime.outputs:simulationTime", "publishRawTF.inputs:timeStamp"), ("context.outputs:context", "publishOdom.inputs:context"), ("context.outputs:context", "publishRawTF.inputs:context"), ("computeOdom.outputs:angularVelocity", "publishOdom.inputs:angularVelocity"), ("computeOdom.outputs:linearVelocity", "publishOdom.inputs:linearVelocity"), ("computeOdom.outputs:orientation", "publishOdom.inputs:orientation"), ("computeOdom.outputs:position", "publishOdom.inputs:position"), ("computeOdom.outputs:orientation", "publishRawTF.inputs:rotation"), ("computeOdom.outputs:position", "publishRawTF.inputs:translation"), ], }, ) except Exception as e: print(e) def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/Limo") self._save_count = 0 self.og_setup() return async def setup_post_load(self): self._world = self.get_world() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._save_count += 1 return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._world.pause() return async def setup_post_reset(self): self._summit_controller.reset() await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
8,504
Python
52.15625
128
0.589135
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/SurfaceGripper/surface_gripper.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import asyncio import weakref import numpy as np import omni import omni.ext import omni.kit.commands import omni.kit.usd import omni.physx as _physx import omni.ui as ui from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.dynamic_control import _dynamic_control as dc # Import extension python module we are testing with absolute import path, as if we are external user (other extension) from omni.isaac.surface_gripper._surface_gripper import Surface_Gripper, Surface_Gripper_Properties from omni.isaac.ui.menu import make_menu_item_description from omni.isaac.ui.ui_utils import ( add_separator, btn_builder, combo_floatfield_slider_builder, get_style, setup_ui_headers, state_btn_builder, ) from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items from pxr import Gf, Sdf, UsdGeom, UsdLux, UsdPhysics import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.physx.scripts import deformableUtils, physicsUtils import carb EXTENSION_NAME = "SurfaceGripper" class Extension(omni.ext.IExt): def on_startup(self, ext_id: str): """Initialize extension and UI elements""" self._ext_id = ext_id # Loads interfaces self._timeline = omni.timeline.get_timeline_interface() self._dc = dc.acquire_dynamic_control_interface() self._usd_context = omni.usd.get_context() self._window = None self._models = {} # Creates UI window with default size of 600x300 # self._window = omni.ui.Window( # title=EXTENSION_NAME, width=300, height=200, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM # ) menu_items = [ make_menu_item_description(ext_id, EXTENSION_NAME, lambda a=weakref.proxy(self): a._menu_callback()) ] self._menu_items = [MenuItemDescription(name="RoadBalanceEdu", sub_menu=menu_items)] add_menu_items(self._menu_items, "Isaac Examples") self._build_ui() self.surface_gripper = None self.cone = None self.box = None self._stage_id = -1 def _build_ui(self): if not self._window: self._window = ui.Window( title=EXTENSION_NAME, width=0, height=0, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM ) self._window.set_visibility_changed_fn(self._on_window) with self._window.frame: with ui.VStack(spacing=5, height=0): title = "Surface Gripper Example" doc_link = "https://docs.omniverse.nvidia.com/isaacsim/latest/features/robots_simulation/ext_omni_isaac_surface_gripper.html" overview = "This Example shows how to simulate a suction-cup gripper in Isaac Sim. " overview += "It simulates suction by creating a Joint between two bodies when the parent and child bodies are close at the gripper's point of contact." overview += "\n\nPress the 'Open in IDE' button to view the source code." setup_ui_headers(self._ext_id, __file__, title, doc_link, overview) frame = ui.CollapsableFrame( title="Command Panel", height=0, collapsed=False, style=get_style(), style_type_name_override="CollapsableFrame", horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED, vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON, ) with frame: with ui.VStack(style=get_style(), spacing=5): args = { "label": "Load Scene", "type": "button", "text": "Load", "tooltip": "Load a gripper into the Scene", "on_clicked_fn": self._on_create_scenario_button_clicked, } self._models["create_button"] = btn_builder(**args) args = { "label": "Gripper State", "type": "button", "a_text": "Close", "b_text": "Open", "tooltip": "Open and Close the Gripper", "on_clicked_fn": self._on_toggle_gripper_button_clicked, } self._models["toggle_button"] = state_btn_builder(**args) add_separator() args = { "label": "Gripper Force (UP)", "default_val": 0, "min": 0, "max": 1.0e2, "step": 1, "tooltip": ["Force in ()", "Force in ()"], } self._models["force_slider"], slider = combo_floatfield_slider_builder(**args) args = { "label": "Set Force", "type": "button", "text": "APPLY", "tooltip": "Apply the Gripper Force to the Z-Axis of the Cone", "on_clicked_fn": self._on_force_button_clicked, } self._models["force_button"] = btn_builder(**args) args = { "label": "Gripper Speed (UP)", "default_val": 0, "min": 0, "max": 5.0e1, "step": 1, "tooltip": ["Speed in ()", "Speed in ()"], } add_separator() self._models["speed_slider"], slider = combo_floatfield_slider_builder(**args) args = { "label": "Set Speed", "type": "button", "text": "APPLY", "tooltip": "Apply Cone Velocity in the Z-Axis", "on_clicked_fn": self._on_speed_button_clicked, } self._models["speed_button"] = btn_builder(**args) ui.Spacer() def on_shutdown(self): remove_menu_items(self._menu_items, "Isaac Examples") self._physx_subs = None self._window = None def _on_window(self, status): if status: self._usd_context = omni.usd.get_context() if self._usd_context is not None: self._stage_event_sub = ( omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update_ui) ) else: self._stage_event_sub = None self._physx_subs = None def _menu_callback(self): self._window.visible = not self._window.visible def _on_update_ui(self, widget): self._models["create_button"].enabled = self._timeline.is_playing() self._models["toggle_button"].enabled = self._timeline.is_playing() self._models["force_button"].enabled = self._timeline.is_playing() self._models["speed_button"].enabled = self._timeline.is_playing() # If the scene has been reloaded, reset UI to create Scenario if self._usd_context.get_stage_id() != self._stage_id: self._models["create_button"].enabled = True # self._models["create_button"].text = "Create Scenario" self._models["create_button"].set_tooltip("Creates a new scenario with the cone on top of the Cube") self._models["create_button"].set_clicked_fn(self._on_create_scenario_button_clicked) self.cone = None self.box = None self._stage_id = -1 def _toggle_gripper_button_ui(self): # Checks if the surface gripper has been created if self.surface_gripper is not None: if self.surface_gripper.is_closed(): self._models["toggle_button"].text = "OPEN" else: self._models["toggle_button"].text = "CLOSE" pass def _on_simulation_step(self, step): # Checks if the simulation is playing, and if the stage has been loaded if self._timeline.is_playing() and self._stage_id != -1: # Check if the handles for cone and box have been loaded if self.cone is None: # self.cone = self._dc.get_rigid_body("/GripperCone") self.cone = self._dc.get_rigid_body("/mirobot_ee/Link6") self.box = self._dc.get_rigid_body("/Box") # If the surface Gripper has been created, update wheter it has been broken or not if self.surface_gripper is not None: self.surface_gripper.update() # if self.surface_gripper.is_closed(): # self.coneGeom.GetDisplayColorAttr().Set([self.color_closed]) # else: # self.coneGeom.GetDisplayColorAttr().Set([self.color_open]) self._toggle_gripper_button_ui() def _on_reset_scenario_button_clicked(self): if self._timeline.is_playing() and self._stage_id != -1: if self.surface_gripper is not None: self.surface_gripper.open() self._dc.set_rigid_body_linear_velocity(self.cone, [0, 0, 0]) self._dc.set_rigid_body_linear_velocity(self.box, [0, 0, 0]) self._dc.set_rigid_body_angular_velocity(self.cone, [0, 0, 0]) self._dc.set_rigid_body_angular_velocity(self.box, [0, 0, 0]) self._dc.set_rigid_body_pose(self.cone, self.gripper_start_pose) self._dc.set_rigid_body_pose(self.box, self.box_start_pose) async def _create_scenario(self, task): done, pending = await asyncio.wait({task}) if task in done: # Repurpose button to reset Scene # self._models["create_button"].text = "Reset Scene" self._models["create_button"].set_tooltip("Resets scenario with the cone on top of the Cube") # Get Handle for stage and stage ID to check if stage was reloaded self._stage = self._usd_context.get_stage() self._stage_id = self._usd_context.get_stage_id() self._timeline.stop() self._models["create_button"].set_clicked_fn(self._on_reset_scenario_button_clicked) # Adds a light to the scene distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/DistantLight")) distantLight.CreateIntensityAttr(500) distantLight.AddOrientOp().Set(Gf.Quatf(-0.3748, -0.42060, -0.0716, 0.823)) # Set up stage with Z up, treat units as cm, set up gravity and ground plane UsdGeom.SetStageUpAxis(self._stage, UsdGeom.Tokens.z) UsdGeom.SetStageMetersPerUnit(self._stage, 1.0) self.scene = UsdPhysics.Scene.Define(self._stage, Sdf.Path("/physicsScene")) self.scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0)) self.scene.CreateGravityMagnitudeAttr().Set(9.81) omni.kit.commands.execute( "AddGroundPlaneCommand", stage=self._stage, planePath="/groundPlane", axis="Z", size=10.000, position=Gf.Vec3f(0), color=Gf.Vec3f(0.5), ) # Colors to represent when gripper is open or closed self.color_closed = Gf.Vec3f(1.0, 0.2, 0.2) self.color_open = Gf.Vec3f(0.2, 1.0, 0.2) # Cone that will represent the gripper carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._ee_path = self._server_root + "/Projects/RBROS2/mirobot_ros2/mirobot_description/urdf/mirobot_urdf_2/Link6.usd" add_reference_to_stage(usd_path=self._ee_path, prim_path="/mirobot_ee") ee_mesh = UsdGeom.Mesh.Get(self._stage, "/mirobot_ee") physicsUtils.set_or_add_translate_op(ee_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.30)) self.gripper_start_pose = dc.Transform([0.0, 0.0, 0.30], [1, 0, 0, 0]) # Box to be picked self.box_start_pose = dc.Transform([0, 0, 0.10], [1, 0, 0, 0]) self.boxGeom = self.createRigidBody( UsdGeom.Cube, "/Box", 0.0010, [0.01, 0.01, 0.01], self.box_start_pose.p, self.box_start_pose.r, [0.2, 0.2, 1] ) # Reordering the quaternion to follow DC convention for later use. self.gripper_start_pose = dc.Transform([0, 0, 0.301], [0, 0, 0, 1]) self.box_start_pose = dc.Transform([0, 0, 0.10], [0, 0, 0, 1]) # Gripper properties self.sgp = Surface_Gripper_Properties() self.sgp.d6JointPath = "/mirobot_ee/Link6/SurfaceGripper" self.sgp.parentPath = "/mirobot_ee/Link6" self.sgp.offset = dc.Transform() self.sgp.offset.p.x = 0 self.sgp.offset.p.z = -0.02947 # 0, 1.5707963, 0 in Euler angles self.sgp.offset.r = [0.7071, 0, 0.7071, 0] # Rotate to point gripper in Z direction self.sgp.gripThreshold = 0.02 self.sgp.forceLimit = 1.0e2 self.sgp.torqueLimit = 1.0e3 self.sgp.bendAngle = np.pi / 4 self.sgp.stiffness = 1.0e4 self.sgp.damping = 1.0e3 self.surface_gripper = Surface_Gripper(self._dc) self.surface_gripper.initialize(self.sgp) # Set camera to a nearby pose and looking directly at the Gripper cone set_camera_view( eye=[4.00, 4.00, 4.00], target=self.gripper_start_pose.p, camera_prim_path="/OmniverseKit_Persp" ) self._physx_subs = _physx.get_physx_interface().subscribe_physics_step_events(self._on_simulation_step) self._timeline.play() def _on_create_scenario_button_clicked(self): # wait for new stage before creating scenario task = asyncio.ensure_future(omni.usd.get_context().new_stage_async()) asyncio.ensure_future(self._create_scenario(task)) def _on_toggle_gripper_button_clicked(self, val=False): if self._timeline.is_playing(): print(f"self.surface_gripper : {self.surface_gripper}") print(f"self.surface_gripper.is_closed() : {self.surface_gripper.is_closed()}") if self.surface_gripper.is_closed(): self.surface_gripper.open() else: self.surface_gripper.close() if self.surface_gripper.is_closed(): self._models["toggle_button"].text = "OPEN" else: self._models["toggle_button"].text = "CLOSE" def _on_speed_button_clicked(self): if self._timeline.is_playing(): self._dc.set_rigid_body_linear_velocity( self.cone, [0, 0, self._models["speed_slider"].get_value_as_float()] ) def _on_force_button_clicked(self): if self._timeline.is_playing(): self._dc.apply_body_force( self.cone, [0, 0, self._models["force_slider"].get_value_as_float()], [0, 0, 0], True ) def createRigidBody(self, bodyType, boxActorPath, mass, scale, position, rotation, color): p = Gf.Vec3f(position[0], position[1], position[2]) orientation = Gf.Quatf(rotation[0], rotation[1], rotation[2], rotation[3]) scale = Gf.Vec3f(scale[0], scale[1], scale[2]) bodyGeom = bodyType.Define(self._stage, boxActorPath) bodyPrim = self._stage.GetPrimAtPath(boxActorPath) bodyGeom.AddTranslateOp().Set(p) bodyGeom.AddOrientOp().Set(orientation) bodyGeom.AddScaleOp().Set(scale) bodyGeom.CreateDisplayColorAttr().Set([color]) UsdPhysics.CollisionAPI.Apply(bodyPrim) if mass > 0: massAPI = UsdPhysics.MassAPI.Apply(bodyPrim) massAPI.CreateMassAttr(mass) UsdPhysics.RigidBodyAPI.Apply(bodyPrim) UsdPhysics.CollisionAPI(bodyPrim) print(bodyPrim.GetPath().pathString) return bodyGeom
17,570
Python
45.607427
171
0.552703
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloMultiTask/hello_multi_task.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.franka.tasks import PickPlace from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.wheeled_robots.controllers import WheelBasePoseController from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController from omni.isaac.core.tasks import BaseTask from omni.isaac.core.utils.types import ArticulationAction # Find a unique string name, to use it for prim paths and scene names from omni.isaac.core.utils.string import find_unique_string_name # Creates a unique prim path from omni.isaac.core.utils.prims import is_prim_path_valid # Checks if a prim path is valid from omni.isaac.core.objects.cuboid import VisualCuboid import numpy as np class RobotsPlaying(BaseTask): def __init__(self, name, offset=None): super().__init__(name=name, offset=offset) self._task_event = 0 # Randomize the task a bit self._jetbot_goal_position = np.array([np.random.uniform(1.2, 1.6), 0.3, 0]) + self._offset self._pick_place_task = PickPlace(cube_initial_position=np.array([0.1, 0.3, 0.05]), target_position=np.array([0.7, -0.3, 0.0515 / 2.0]), offset=offset) return def set_up_scene(self, scene): super().set_up_scene(scene) self._pick_place_task.set_up_scene(scene) jetbot_name = find_unique_string_name( initial_name="fancy_jetbot", is_unique_fn=lambda x: not self.scene.object_exists(x) ) jetbot_prim_path = find_unique_string_name( initial_name="/World/Fancy_Jetbot", is_unique_fn=lambda x: not is_prim_path_valid(x) ) assets_root_path = get_assets_root_path() jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd" self._jetbot = scene.add( WheeledRobot( prim_path=jetbot_prim_path, name=jetbot_name, wheel_dof_names=["left_wheel_joint", "right_wheel_joint"], create_robot=True, usd_path=jetbot_asset_path, position=np.array([0, 0.3, 0]), ) ) self._task_objects[self._jetbot.name] = self._jetbot pick_place_params = self._pick_place_task.get_params() self._franka = scene.get_object(pick_place_params["robot_name"]["value"]) current_position, _ = self._franka.get_world_pose() self._franka.set_world_pose(position=current_position + np.array([1.0, 0, 0])) self._franka.set_default_state(position=current_position + np.array([1.0, 0, 0])) self._move_task_objects_to_their_frame() return def get_observations(self): current_jetbot_position, current_jetbot_orientation = self._jetbot.get_world_pose() observations= { self.name + "_event": self._task_event, #change task event to make it unique self._jetbot.name: { "position": current_jetbot_position, "orientation": current_jetbot_orientation, "goal_position": self._jetbot_goal_position } } observations.update(self._pick_place_task.get_observations()) return observations def get_params(self): pick_place_params = self._pick_place_task.get_params() params_representation = pick_place_params params_representation["jetbot_name"] = {"value": self._jetbot.name, "modifiable": False} params_representation["franka_name"] = pick_place_params["robot_name"] return params_representation def pre_step(self, control_index, simulation_time): if self._task_event == 0: current_jetbot_position, _ = self._jetbot.get_world_pose() if np.mean(np.abs(current_jetbot_position[:2] - self._jetbot_goal_position[:2])) < 0.04: self._task_event += 1 self._cube_arrive_step_index = control_index elif self._task_event == 1: if control_index - self._cube_arrive_step_index == 200: self._task_event += 1 return def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._task_event = 0 return class HelloMultiTask(BaseSample): def __init__(self) -> None: super().__init__() # Add lists for tasks, self._tasks = [] self._num_of_tasks = 3 # Add lists for controllers self._franka_controllers = [] self._jetbot_controllers = [] # Add lists for variables needed for control self._jetbots = [] self._frankas = [] self._cube_names = [] return def setup_scene(self): world = self.get_world() for i in range(self._num_of_tasks): world.add_task(RobotsPlaying(name="my_awesome_task_" + str(i), offset=np.array([0, (i * 2) - 3, 0]))) return async def setup_post_load(self): self._world = self.get_world() for i in range(self._num_of_tasks): self._tasks.append(self._world.get_task(name="my_awesome_task_" + str(i))) # Get variables needed for control task_params = self._tasks[i].get_params() self._frankas.append(self._world.scene.get_object(task_params["franka_name"]["value"])) self._jetbots.append(self._world.scene.get_object(task_params["jetbot_name"]["value"])) self._cube_names.append(task_params["cube_name"]["value"]) # Define controllers self._franka_controllers.append(PickPlaceController(name="pick_place_controller", gripper=self._frankas[i].gripper, robot_articulation=self._frankas[i], # Change the default events dt of the # pick and place controller to slow down some of the transitions # to pick up further blocks # Note: this is a simple pick and place state machine # based on events dt and not event success # check the different events description in the api # documentation events_dt=[0.008, 0.002, 0.5, 0.1, 0.05, 0.05, 0.0025, 1, 0.008, 0.08])) self._jetbot_controllers.append(WheelBasePoseController(name="cool_controller", open_loop_wheel_controller= DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125))) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_post_reset(self): for i in range(len(self._tasks)): # Reset all controllers self._franka_controllers[i].reset() self._jetbot_controllers[i].reset() await self._world.play_async() return def physics_step(self, step_size): current_observations = self._world.get_observations() for i in range(len(self._tasks)): # Apply actions for each task if current_observations[self._tasks[i].name + "_event"] == 0: self._jetbots[i].apply_wheel_actions( self._jetbot_controllers[i].forward( start_position=current_observations[self._jetbots[i].name]["position"], start_orientation=current_observations[self._jetbots[i].name]["orientation"], goal_position=current_observations[self._jetbots[i].name]["goal_position"])) elif current_observations[self._tasks[i].name + "_event"] == 1: self._jetbots[i].apply_wheel_actions(ArticulationAction(joint_velocities=[-8.0, -8.0])) elif current_observations[self._tasks[i].name + "_event"] == 2: self._jetbots[i].apply_wheel_actions(ArticulationAction(joint_velocities=[0.0, 0.0])) actions = self._franka_controllers[i].forward( picking_position=current_observations[self._cube_names[i]]["position"], placing_position=current_observations[self._cube_names[i]]["target_position"], current_joint_positions=current_observations[self._frankas[i].name]["joint_positions"]) self._frankas[i].apply_action(actions) return # This function is called after a hot reload or a clear # to delete the variables defined in this extension application def world_cleanup(self): self._tasks = [] self._franka_controllers = [] self._jetbot_controllers = [] self._jetbots = [] self._frankas = [] self._cube_names = [] return
10,052
Python
51.088083
136
0.575308
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloLight/hello_light.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np import omni.usd # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from omni.isaac.core.objects import DynamicCuboid from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf, UsdLux class HelloLight(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() fancy_cube = world.scene.add( DynamicCuboid( prim_path="/World/random_cube", # The prim path of the cube in the USD stage name="fancy_cube", # The unique name used to retrieve the object from the scene later on position=np.array([0, 0, 1.0]), # Using the current stage units which is in meters by default. scale=np.array([0.5015, 0.5015, 0.5015]), # most arguments accept mainly numpy arrays. color=np.array([0, 0, 1.0]), # RGB channels, going from 0-1 )) stage = omni.usd.get_context().get_stage() ## Create a Sphere light # sphereLight = UsdLux.SphereLight.Define(stage, Sdf.Path("/World/SphereLight")) # sphereLight.CreateRadiusAttr(150) # sphereLight.CreateIntensityAttr(30000) # sphereLight.AddTranslateOp().Set(Gf.Vec3f(650.0, 0.0, 1150.0)) ## Create a distant light distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/World/distantLight")) distantLight.CreateIntensityAttr(1000) ## Rotatation and translation of the light # distantLight.AddRotateXYZOp().Set((-36, 36, 0)) # distantLight.AddTranslateOp().Set(Gf.Vec3f(650.0, 0.0, 1150.0)) return # Here we assign the class's variables # this function is called after load button is pressed # regardless starting from an empty stage or not # this is called after setup_scene and after # one physics time step to propagate appropriate # physics handles which are needed to retrieve # many physical properties of the different objects async def setup_post_load(self): self._world = self.get_world() self._cube = self._world.scene.get_object("fancy_cube") self._world.add_physics_callback("sim_step", callback_fn=self.print_cube_info) #callback names have to be unique return # here we define the physics callback to be called before each physics step, all physics callbacks must take # step_size as an argument def print_cube_info(self, step_size): position, orientation = self._cube.get_world_pose() linear_velocity = self._cube.get_linear_velocity() # will be shown on terminal print("Cube position is : " + str(position)) print("Cube's orientation is : " + str(orientation)) print("Cube's linear velocity is : " + str(linear_velocity)) # async def setup_pre_reset(self): # return # async def setup_post_reset(self): # return # def world_cleanup(self): # return
3,588
Python
40.732558
120
0.671126
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactoryDemo/garage_conveyor.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.examples.base_sample import BaseSample # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import Sdf, UsdLux, Gf, UsdPhysics, PhysxSchema from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from omni.isaac.sensor import Camera from .geom_utils import createRigidBody, addObjectsGeom from .inference_utils import triton_inference import omni.replicator.core as rep import omni.graph.core as og import numpy as np import random import carb import omni import cv2 PROPS = { 'spam' : "/Isaac/Props/YCB/Axis_Aligned/010_potted_meat_can.usd", 'jelly' : "/Isaac/Props/YCB/Axis_Aligned/009_gelatin_box.usd", 'tuna' : "/Isaac/Props/YCB/Axis_Aligned/007_tuna_fish_can.usd", 'cleanser' : "/Isaac/Props/YCB/Axis_Aligned/021_bleach_cleanser.usd", 'tomato_soup' : "/Isaac/Props/YCB/Axis_Aligned/005_tomato_soup_can.usd" } class GarageConveyor(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._nucleus_server = get_assets_root_path() self._bin_path = self._nucleus_server + "/Isaac/Props/KLT_Bin/small_KLT_visual.usd" self._bin_mass = 10.5 self._bin_scale = np.array([2.0, 2.0, 1.0]) self._bin_position = np.array([7.0, -0.2, 1.0]) self._plane_scale = np.array([0.4, 0.24, 1.0]) self._plane_position = np.array([-1.75, 1.2, 0.9]) self._plane_rotation = np.array([0.0, 0.0, 0.0]) self._gemini_usd_path = self._server_root + "/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Sensors/Orbbec/Gemini 2/orbbec_gemini2_V1.0.usd" self._gemini_position = np.array([-1.75, 1.2, 1.5]) self._gemini_rotation = np.array([0.0, 0.7071068, -0.7071068, 0]) self._sim_count = 0 self._is_captured = False return def add_background(self): self._world = self.get_world() bg_path = self._server_root + "/Projects/RBROS2/ConveyorGarage/Franka_Garage_Empty.usd" add_reference_to_stage(usd_path=bg_path, prim_path=f"/World/Garage") def add_camera(self): self._camera = Camera( prim_path="/World/normal_camera", position=np.array([-1.75, 1.2, 2.0]), frequency=30, resolution=(1280, 720), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 90, 180 ]), degrees=True), ) self._camera.set_focal_length(2.0) self._camera.initialize() self._camera.add_motion_vectors_to_frame() def add_bin(self, scene): add_reference_to_stage(usd_path=self._bin_path, prim_path="/World/inference_bin") createRigidBody(self._stage, "/World/inference_bin", False) self._bin_ref_geom = addObjectsGeom( scene, "inference_bin", self._bin_scale, self._bin_position, self._bin_mass, orientation=None ) def add_random_objects(self, scene, num_objects=3): choicelist = [random.choice( list(PROPS.keys()) ) for i in range(num_objects)] for _object in choicelist: prim_path = self._nucleus_server + PROPS[_object] prim_name = f"{_object}_{random.randint(0, 100)}" add_reference_to_stage(usd_path=prim_path, prim_path=f"/World/{prim_name}") createRigidBody(self._stage, f"/World/{prim_name}") position = ( random.uniform(6.8, 7.05), random.uniform(-0.3, -0.1), random.uniform(1.1, 1.3) ) prim_geom = addObjectsGeom( scene, prim_name, np.array([1.0, 1.0, 1.0]), position, 0.02 ) def add_light(self): distantLight = UsdLux.CylinderLight.Define(self._stage, Sdf.Path("/World/cylinderLight")) distantLight.CreateIntensityAttr(60000) distantLight.AddTranslateOp().Set(Gf.Vec3f(-1.2, 0.9, 3.0)) distantLight.AddScaleOp().Set((0.1, 4.0, 0.1)) distantLight.AddRotateXYZOp().Set((0, 0, 90)) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() self.add_background() self.add_light() self.add_camera() self.add_bin(self._world.scene) self.add_random_objects(self._world.scene, num_objects=3) self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) return async def setup_post_load(self): self._world = self.get_world() self._world.scene.enable_bounding_boxes_computations() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique self._cur_bin_position, _ = self._bin_ref_geom.get_world_pose() self._prev_bin_position = self._cur_bin_position return def physics_callback(self, step_size): self._cur_bin_position, _ = self._bin_ref_geom.get_world_pose() bin_vel = np.linalg.norm(self._cur_bin_position - self._prev_bin_position) if self._cur_bin_position[1] > 1.1 and bin_vel < 1e-5 and not self._is_captured: print("capture image...") self._is_captured = True self._camera.get_current_frame() cur_img = self._camera.get_rgba()[:, :, :3] triton_inference(cur_img) else: # print(f"bin_vel: {bin_vel} / self._is_captured : {self._is_captured} / self._cur_bin_position[1]: {self._cur_bin_position[1]}") pass self._sim_count += 1 self._prev_bin_position = self._cur_bin_position async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): self._world.pause() return
7,012
Python
37.745856
141
0.628066
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorFactoryDemo/export/model_export.py
import os import torch import torchvision import warnings warnings.filterwarnings("ignore") device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # load the PyTorch model. pytorch_dir = "/home/kimsooyoung/Documents/model.pth" model = torch.load(pytorch_dir).cuda() # Export Model dummy_input = torch.rand(1, 3, 1024, 1024).cuda() torch.onnx.export( model, dummy_input, "model.onnx", opset_version=11, input_names=["input"], output_names=["boxes", "labels", "scores"] )
527
Python
20.999999
83
0.698292
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaNutsTable/nut_bolt_controller.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import typing import numpy as np from omni.isaac.core.controllers.base_controller import BaseController from omni.isaac.core.utils.rotations import euler_angles_to_quat, quat_to_euler_angles from omni.isaac.core.utils.stage import get_stage_units from omni.isaac.franka.controllers.rmpflow_controller import RMPFlowController from omni.isaac.franka.franka import Franka from omni.isaac.manipulators.controllers.pick_place_controller import PickPlaceController from .nut_vibra_table_controller import VibraFSM from .screw_controller import ScrewController class NutBoltController(BaseController): """ A state machine to tie nuts onto bolts with a vibrating table feeding the nuts - State 0: Pick and Place from pickup location on vibration table to different bolts - State 1: Screw nut onto bolt Args: name (str): Name id of the controller franka (Franka): Franka Robot """ def __init__(self, name: str, franka: Franka) -> None: BaseController.__init__(self, name=name) self._event = 0 self._franka = franka self._gripper = self._franka.gripper self._end_effector_initial_height = self._franka.get_world_pose()[0][2] + (0.4 / get_stage_units()) self._pause = False self._cspace_controller = RMPFlowController(name="pickplace_cspace_controller", robot_articulation=self._franka) pick_place_events_dt = [0.008, 0.005, 1, 0.1, 0.05, 0.01, 0.0025] self._pick_place_controller = PickPlaceController( name="pickplace_controller", cspace_controller=self._cspace_controller, gripper=self._gripper, end_effector_initial_height=self._end_effector_initial_height, events_dt=pick_place_events_dt, ) self._screw_controller = ScrewController( name=f"screw_controller", cspace_controller=self._cspace_controller, gripper=self._gripper ) self._vibraSM = VibraFSM() self._i = self._vibraSM._i self._vibraSM.stop_feed_after_delay(delay_sec=5.0) return def is_paused(self) -> bool: """ Returns: bool: True if the state machine is paused. Otherwise False. """ return self._pause def get_current_event(self) -> int: """ Returns: int: Current event/ phase of the state machine """ return self._event def forward( self, initial_picking_position: np.ndarray, bolt_top: np.ndarray, gripper_to_nut_offset: np.ndarray, x_offset: np.ndarray, ) -> np.ndarray: """Runs the controller one step. Args: initial_picking_position (np.ndarray): initial nut position at table feeder bolt_top (np.ndarray): bolt target position #""" _vibra_table_transforms = np.array([0.0, 0.0, 0.0]) if self.is_paused(): return _vibra_table_transforms offsetPos = self._vibraSM.update() _vibra_table_transforms = np.array(offsetPos, dtype=float) if self._vibraSM._state == "stop" and self._event == 0: initial_effector_orientation = quat_to_euler_angles(self._gripper.get_world_pose()[1]) initial_effector_orientation[2] = np.pi / 2 initial_end_effector_orientation = euler_angles_to_quat(initial_effector_orientation) actions = self._pick_place_controller.forward( picking_position=initial_picking_position + gripper_to_nut_offset, placing_position=bolt_top + np.array([x_offset, 0.0, 0.0]), current_joint_positions=self._franka.get_joint_positions(), end_effector_orientation=initial_end_effector_orientation, ) self._franka.apply_action(actions) if self._pick_place_controller.is_done(): self._vibraSM._set_delayed_state_change(delay_sec=1.0, nextState="backward") self._event = 1 if self._event == 1: actions2 = self._screw_controller.forward( franka_art_controller=self._franka.get_articulation_controller(), bolt_position=bolt_top, current_joint_positions=self._franka.get_joint_positions(), current_joint_velocities=self._franka.get_joint_velocities(), ) self._franka.apply_action(actions2) if self._screw_controller.is_paused(): self.pause() self._i += 1 return _vibra_table_transforms def reset(self, franka: Franka) -> None: """Resets the state machine to start from the first phase/ event Args: franka (Franka): Franka Robot """ BaseController.reset(self) self._event = 0 self._pause = False self._franka = franka self._gripper = self._franka.gripper self._end_effector_initial_height = self._franka.get_world_pose()[0][2] + (0.4 / get_stage_units()) self._pick_place_controller.reset(end_effector_initial_height=self._end_effector_initial_height) self._screw_controller.reset() return def pause(self) -> None: """Pauses the state machine's time and phase.""" self._pause = True return def resume(self) -> None: """Resumes the state machine's time and phase.""" self._pause = False return
5,896
Python
38.313333
120
0.633311
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaNutsTable/screw_controller.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import typing import numpy as np from omni.isaac.core.articulations import Articulation from omni.isaac.core.controllers.articulation_controller import ArticulationController from omni.isaac.core.controllers.base_controller import BaseController from omni.isaac.core.utils.rotations import euler_angles_to_quat, quat_to_euler_angles from omni.isaac.core.utils.stage import get_stage_units from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.manipulators.grippers.gripper import Gripper class ScrewController(BaseController): """ A state machine for screwing nuts on bolts Each phase runs for 1 second, which is the internal time of the state machine Dt of each phase/ event step is defined - State 0: Lower end_effector down to encircle the nut - State 1: Close grip - State 2: Re-Center end-effector grip with that of the nut and bolt - State 3: Screw Clockwise - State 4: Open grip (initiates at this state and cycles until limit) - State 5: Screw counter-clockwise Args: name (str): Name id of the controller cspace_controller (BaseController): a cartesian space controller that returns an ArticulationAction type gripper (Gripper): a gripper controller for open/ close actions. events_dt (typing.Optional[typing.List[float]], optional): Dt of each phase/ event step. 10 phases dt has to be defined. Defaults to None. Raises: Exception: events dt need to be list or numpy array Exception: events dt need have length of 5 or less """ def __init__( self, name: str, cspace_controller: BaseController, gripper: Gripper, events_dt: typing.Optional[typing.List[float]] = None, ) -> None: BaseController.__init__(self, name=name) self._event = 4 self._t = 0 self._events_dt = events_dt if self._events_dt is None: self._events_dt = [0.01, 0.1, 0.1, 0.025, 0.1, 0.05] else: if not isinstance(self._events_dt, np.ndarray) and not isinstance(self._events_dt, list): raise Exception("events dt need to be list or numpy array") elif isinstance(self._events_dt, np.ndarray): self._events_dt = self._events_dt.tolist() if len(self._events_dt) > 5: raise Exception("events dt need have length of 5 or less") self._cspace_controller = cspace_controller self._gripper = gripper self._pause = False self._start = True self._screw_position = np.array([0.0, 0.0, 0.0]) self._final_position = np.array([0.0, 0.0, 0.0]) self._screw_speed = 360.0 / 180.0 * np.pi self._screw_speed_back = 720.0 / 180.0 * np.pi return def is_paused(self) -> bool: """ Returns: bool: True if the state machine is paused. Otherwise False. """ return self._pause def get_current_event(self) -> int: """ Returns: int: Current event/ phase of the state machine """ return self._event def forward( self, franka_art_controller: ArticulationController, bolt_position: np.ndarray, current_joint_positions: np.ndarray, current_joint_velocities: np.ndarray, ) -> ArticulationAction: """Runs the controller one step. Args: franka_art_controller (ArticulationController): Robot's Articulation Controller. bolt_position (np.ndarray): bolt position to reference for screwing position. current_joint_positions (np.ndarray): Current joint positions of the robot. current_joint_velocities (np.ndarray): Current joint velocities of the robot. Returns: ArticulationAction: action to be executed by the ArticulationController """ if self._pause or self._event >= len(self._events_dt): target_joints = [None] * current_joint_positions.shape[0] return ArticulationAction(joint_positions=target_joints) if self._event == 0 and self._start: self._screw_position = np.copy(bolt_position) self._final_position = np.copy(bolt_position) self._start = False self._target_end_effector_orientation = self._gripper.get_world_pose()[1] if self._event == 0: franka_art_controller.switch_dof_control_mode(dof_index=6, mode="position") orientation_quat = self._gripper.get_world_pose()[1] self.orientation_euler = quat_to_euler_angles(orientation_quat) target_orientation_euler = np.array([self.orientation_euler[0], self.orientation_euler[1], -np.pi / 2]) target_orientation_quat = euler_angles_to_quat(target_orientation_euler) target_joints = self._cspace_controller.forward( target_end_effector_position=self._screw_position, target_end_effector_orientation=target_orientation_quat, ) if self._event == 1: self._lower = False franka_art_controller.switch_dof_control_mode(dof_index=6, mode="position") target_joints = self._gripper.forward(action="close") if self._event == 2: franka_art_controller.switch_dof_control_mode(dof_index=6, mode="position") orientation_quat = self._gripper.get_world_pose()[1] self.orientation_euler = quat_to_euler_angles(orientation_quat) target_orientation_euler = np.array([self.orientation_euler[0], self.orientation_euler[1], -np.pi / 2]) target_orientation_quat = euler_angles_to_quat(target_orientation_euler) finger_pos = current_joint_positions[-2:] positive_x_offset = finger_pos[1] - finger_pos[0] target_joints = self._cspace_controller.forward( target_end_effector_position=self._screw_position + np.array([positive_x_offset, 0.0, -0.001]), target_end_effector_orientation=target_orientation_quat, ) if self._event == 3: franka_art_controller.switch_dof_control_mode(dof_index=6, mode="velocity") target_joint_velocities = [None] * current_joint_velocities.shape[0] target_joint_velocities[6] = self._screw_speed if current_joint_positions[6] > 2.7: target_joint_velocities[6] = 0.0 target_joints = ArticulationAction(joint_velocities=target_joint_velocities) if self._event == 4: franka_art_controller.switch_dof_control_mode(dof_index=6, mode="position") target_joints = self._gripper.forward(action="open") if self._event == 5: franka_art_controller.switch_dof_control_mode(dof_index=6, mode="velocity") target_joint_velocities = [None] * current_joint_velocities.shape[0] target_joint_velocities[6] = -self._screw_speed_back if current_joint_positions[6] < -0.4: target_joint_velocities[6] = 0.0 target_joints = ArticulationAction(joint_velocities=target_joint_velocities) self._t += self._events_dt[self._event] if self._t >= 1.0: self._event = (self._event + 1) % 6 self._t = 0 if self._event == 5: if not self._start and (bolt_position[2] - self._final_position[2] > 0.0198): self.pause() return ArticulationAction(joint_positions=[None] * current_joint_positions.shape[0]) if self._start: self._screw_position[2] -= 0.001 self._final_position[2] -= 0.001 if bolt_position[2] - self._screw_position[2] < 0.013: self._screw_position[2] -= 0.0018 self._final_position[2] -= 0.0018 return target_joints def reset(self, events_dt: typing.Optional[typing.List[float]] = None) -> None: """Resets the state machine to start from the first phase/ event Args: events_dt (typing.Optional[typing.List[float]], optional): Dt of each phase/ event step. Defaults to None. Raises: Exception: events dt need to be list or numpy array Exception: events dt need have length of 5 or less """ BaseController.reset(self) self._cspace_controller.reset() self._event = 4 self._t = 0 self._pause = False self._start = True self._screw_position = np.array([0.0, 0.0, 0.0]) self._final_position = np.array([0.0, 0.0, 0.0]) self._screw_speed = 360.0 / 180.0 * np.pi self._screw_speed_back = 720.0 / 180.0 * np.pi # self._gripper = gripper if events_dt is not None: self._events_dt = events_dt if not isinstance(self._events_dt, np.ndarray) and not isinstance(self._events_dt, list): raise Exception("events dt need to be list or numpy array") elif isinstance(self._events_dt, np.ndarray): self._events_dt = self._events_dt.tolist() if len(self._events_dt) > 5: raise Exception("events dt need have length of 5 or less") return def is_done(self) -> bool: """ Returns: bool: True if the state machine reached the last phase. Otherwise False. """ if self._event >= len(self._events_dt): return True else: return False def pause(self) -> None: """Pauses the state machine's time and phase.""" self._pause = True return def resume(self) -> None: """Resumes the state machine's time and phase.""" self._pause = False return
10,293
Python
42.434599
146
0.61605
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaNutsTable/franka_nut_and_bolt.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np from omni.isaac.core.materials.physics_material import PhysicsMaterial from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.prims.xform_prim import XFormPrim from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core import SimulationContext from omni.isaac.franka.franka import Franka from omni.isaac.sensor import Camera import omni.isaac.core.utils.numpy.rotations as rot_utils import omni.usd import h5py from pxr import Gf, PhysxSchema, Usd, UsdPhysics, UsdShade, UsdGeom, Sdf, Tf, UsdLux from .nut_bolt_controller import NutBoltController # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html class FrankaNutAndBolt(BaseSample): def __init__(self) -> None: super().__init__() # SCENE GEOMETRY # env (group) spacing: self._env_spacing = 2.0 # franka self._stool_height = 0.15 self._franka_position = np.array([0.269, 0.1778, 0.0]) # Gf.Vec3f(0.269, 0.1778, 0.0) # table and vibra table: self._table_position = np.array([0.5, 0.0, 0.0]) # Gf.Vec3f(0.5, 0.0, 0.0) self._table_scale = 0.01 self._tooling_plate_offset = np.array([0.0, 0.0, 0.0]) self._vibra_table_position_offset = np.array([0.157, -0.1524, 0.0]) self._vibra_top_offset = np.array([0.0, 0.0, 0.15]) self._vibra_table_top_to_collider_offset = np.array([0.05, 2.5, -0.59]) * 0.01 # xyz relative to the vibra table where the nut should be picked up self._vibra_table_nut_pickup_pos_offset = np.array([0.124, 0.24, 0.158]) # nut self._nut_height = 0.016 self._nut_spiral_center_vibra_offset = np.array([-0.04, -0.17, 0.01]) # randomize initial nut and bolt positions self._nut_radius = 0.055 self._nut_height_delta = 0.03 self._nut_dist_delta = 0.03 self._mass_nut = 0.065 # pipe and bolt parameters self._bolt_length = 0.1 self._bolt_radius = 0.11 self._pipe_pos_on_table = np.array([0.2032, 0.381, 0.0]) self._bolt_z_offset_to_pipe = 0.08 self._gripper_to_nut_offset = np.array([0.0, 0.0, 0.003]) self._top_of_bolt = ( np.array([0.0, 0.0, self._bolt_length + (self._nut_height / 2)]) + self._gripper_to_nut_offset ) # randomization self._randomize_nut_positions = True self._nut_position_noise_minmax = 0.005 self._rng_seed = 8 # states self._reset_hydra_instancing_on_shutdown = False self._time = 0.0 self._fsm_time = 0.0 # some global sim options: self._time_steps_per_second = 240 # 4.167ms aprx self._fsm_update_rate = 60 self._solverPositionIterations = 4 self._solverVelocityIterations = 1 self._solver_type = "TGS" self._ik_damping = 0.1 self._num_bolts = 6 self._num_nuts = 12 self._sim_dt = 1.0 / self._time_steps_per_second self._fsm_update_dt = 1.0 / self._fsm_update_rate self._sim_time_list = [] self._joint_positions = [] self._joint_velocities = [] self._camera1_img = [] self._camera2_img = [] self._camera3_img = [] return def setup_scene(self): # setup asset paths: self.nucleus_server = get_assets_root_path() self.asset_folder = self.nucleus_server + "/Isaac/Samples/Examples/FrankaNutBolt/" self.asset_paths = { "shop_table": self.asset_folder + "SubUSDs/Shop_Table/Shop_Table.usd", "tooling_plate": self.asset_folder + "SubUSDs/Tooling_Plate/Tooling_Plate.usd", "nut": self.asset_folder + "SubUSDs/Nut/M20_Nut_Tight_R256_Franka_SI.usd", "bolt": self.asset_folder + "SubUSDs/Bolt/M20_Bolt_Tight_R512_Franka_SI.usd", "vibra_table_top": self.asset_folder + "SubUSDs/VibrationTable_Top/VibrationTable_Top.usd", "vibra_table_bot": self.asset_folder + "SubUSDs/VibrationTable_Base/VibrationTable_Base.usd", "vibra_table_collision": self.asset_folder + "SubUSDs/VibrationTable_Top_collision.usd", "vibra_table_clamps": self.asset_folder + "SubUSDs/Clamps/Clamps.usd", "pipe": self.asset_folder + "SubUSDs/Pipe/Pipe.usd", } world = self.get_world() world.scene.add_default_ground_plane() stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() # Change Default SphereLight Intensity sphereLight = stage.GetPrimAtPath("/World/defaultGroundPlane/SphereLight") sphereLightIntensity = sphereLight.GetAttribute("intensity") sphereLightIntensity.Set(10000) # Add Distance Light distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/World/distantLight")) distantLight.CreateIntensityAttr(300) # Add New Sphere Light new_sphereLight = UsdLux.SphereLight.Define(stage, Sdf.Path("/World/sphereLight")) new_sphereLight.CreateIntensityAttr(20000) new_sphereLight.AddTranslateOp().Set(Gf.Vec3f(3.0, 0.0, 2.5)) world.scene.add(XFormPrim(prim_path="/World/collisionGroups", name="collision_groups_xform")) self._setup_simulation() # add_table_assets add_reference_to_stage(usd_path=self.asset_paths["shop_table"], prim_path="/World/env/table") world.scene.add(GeometryPrim(prim_path="/World/env/table", name=f"table_ref_geom", collision=True)) add_reference_to_stage(usd_path=self.asset_paths["tooling_plate"], prim_path="/World/env/tooling_plate") world.scene.add(GeometryPrim(prim_path="/World/env/tooling_plate", name=f"tooling_plate_geom", collision=True)) add_reference_to_stage(usd_path=self.asset_paths["pipe"], prim_path="/World/env/pipe") world.scene.add(GeometryPrim(prim_path="/World/env/pipe", name=f"pipe_geom", collision=True)) # add_vibra_table_assets add_reference_to_stage(usd_path=self.asset_paths["vibra_table_bot"], prim_path="/World/env/vibra_table_bottom") world.scene.add(GeometryPrim(prim_path="/World/env/vibra_table_bottom", name=f"vibra_table_bottom_geom")) add_reference_to_stage( usd_path=self.asset_paths["vibra_table_clamps"], prim_path="/World/env/vibra_table_clamps" ) world.scene.add( GeometryPrim(prim_path="/World/env/vibra_table_clamps", name=f"vibra_table_clamps_geom", collision=True) ) world.scene.add(XFormPrim(prim_path="/World/env/vibra_table", name=f"vibra_table_xform")) add_reference_to_stage(usd_path=self.asset_paths["vibra_table_top"], prim_path="/World/env/vibra_table/visual") add_reference_to_stage( usd_path=self.asset_paths["vibra_table_collision"], prim_path="/World/env/vibra_table/collision" ) world.scene.add(XFormPrim(prim_path="/World/env/vibra_table/visual", name=f"vibra_table_visual_xform")) world.scene.add( GeometryPrim( prim_path="/World/env/vibra_table/collision", name=f"vibra_table_collision_ref_geom", collision=True ) ) # add_nuts_bolts_assets for bolt in range(self._num_bolts): add_reference_to_stage(usd_path=self.asset_paths["bolt"], prim_path=f"/World/env/bolt{bolt}") world.scene.add(GeometryPrim(prim_path=f"/World/env/bolt{bolt}", name=f"bolt{bolt}_geom")) for nut in range(self._num_nuts): add_reference_to_stage(usd_path=self.asset_paths["nut"], prim_path=f"/World/env/nut{nut}") world.scene.add(GeometryPrim(prim_path=f"/World/env/nut{nut}", name=f"nut{nut}_geom")) # add_franka_assets self._franka = world.scene.add(Franka(prim_path="/World/env/franka", name=f"franka")) self._camera1 = Camera( prim_path="/World/env/franka/panda_hand/hand_camera", # position=np.array([0.088, 0.0, 0.926]), translation=np.array([0.1, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 180, -90 - 25, 0 ]), degrees=True), ) self._camera1.set_clipping_range(0.1, 1000000.0) self._camera1.initialize() self._camera1.add_motion_vectors_to_frame() self._camera1.set_visibility(False) self._camera2 = Camera( prim_path="/World/top_camera", position=np.array([0.5, 0.0, 5.0]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 90, 0 ]), degrees=True), ) self._camera2.initialize() self._camera2.set_visibility(True) # HDF Data Collection Setup self._save_count = 0 self._f = h5py.File('franka_bolts_nuts_table.hdf5','w') self._group_f = self._f.create_group("isaac_dataset") self._img_f = self._group_f.create_group("camera_images") return async def setup_post_load(self): self._world = self.get_world() self._rng = np.random.default_rng(self._rng_seed) self._world.scene.enable_bounding_boxes_computations() await self._setup_materials() # next four functions are for setting up the right positions and orientations for all assets await self._add_table() await self._add_vibra_table() await self._add_nuts_and_bolt(add_debug_nut=self._num_nuts == 2) await self._add_franka() self._controller = NutBoltController(name="nut_bolt_controller", franka=self._franka) self._franka.gripper.open() self._rbApi2 = UsdPhysics.RigidBodyAPI.Apply(self._vibra_table_xform.prim.GetPrim()) self._world.add_physics_callback(f"sim_step", callback_fn=self.physics_step) self._camera3 = Camera( prim_path="/World/front_camera", position=self._franka_position + np.array([1.0, 0.0, 0.3 + self._table_height]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 0, 180 ]), degrees=True), ) self._camera3.set_clipping_range(0.1, 1000000.0) self._camera3.set_focal_length(1.0) self._camera3.initialize() self._camera3.set_visibility(False) await self._world.play_async() return def physics_step(self, step_size): self._camera1.get_current_frame() self._camera2.get_current_frame() self._camera3.get_current_frame() current_time = self.simulation_context.current_time current_joint_pos = self._franka.get_joint_positions() current_joint_vel = self._franka.get_joint_velocities() if self._save_count % 100 == 0: self._sim_time_list.append(current_time) self._joint_positions.append(current_joint_pos) self._joint_velocities.append(current_joint_vel) self._camera1_img.append(self._camera1.get_rgba()[:, :, :3]) self._camera2_img.append(self._camera2.get_rgba()[:, :, :3]) self._camera3_img.append(self._camera3.get_rgba()[:, :, :3]) print("Collecting data...") elif self._save_count > 2000: self.world_cleanup() self._save_count += 1 if self._controller.is_paused(): if self._controller._i >= min(self._num_nuts, self._num_bolts): self._rbApi2.CreateVelocityAttr().Set(Gf.Vec3f(0.0, 0.0, 0.0)) return self._controller.reset(self._franka) if self._controller._i < min(self._num_nuts, self._num_bolts): initial_position = self._vibra_table_nut_pickup_pos_offset + self._vibra_table_position self._bolt_geom = self._world.scene.get_object(f"bolt{self._controller._i}_geom") finger_pos = self._franka.get_joint_positions()[-2:] positive_x_offset = finger_pos[1] - finger_pos[0] bolt_position, _ = self._bolt_geom.get_world_pose() placing_position = bolt_position + self._top_of_bolt _vibra_table_transforms = self._controller.forward( initial_picking_position=initial_position, bolt_top=placing_position, gripper_to_nut_offset=self._gripper_to_nut_offset, x_offset=positive_x_offset, ) self._rbApi2.CreateVelocityAttr().Set( Gf.Vec3f(_vibra_table_transforms[0], _vibra_table_transforms[1], _vibra_table_transforms[2]) ) return async def _setup_materials(self): self._bolt_physics_material = PhysicsMaterial( prim_path="/World/PhysicsMaterials/BoltMaterial", name="bolt_material_physics", static_friction=0.2, dynamic_friction=0.2, ) self._nut_physics_material = PhysicsMaterial( prim_path="/World/PhysicsMaterials/NutMaterial", name="nut_material_physics", static_friction=0.2, dynamic_friction=0.2, ) self._vibra_table_physics_material = PhysicsMaterial( prim_path="/World/PhysicsMaterials/VibraTableMaterial", name="vibra_table_material_physics", static_friction=0.2, dynamic_friction=0.2, ) self._franka_finger_physics_material = PhysicsMaterial( prim_path="/World/PhysicsMaterials/FrankaFingerMaterial", name="franka_finger_material_physics", static_friction=0.7, dynamic_friction=0.7, ) await self._world.reset_async() async def _add_table(self): ##shop_table self._table_ref_geom = self._world.scene.get_object(f"table_ref_geom") self._table_ref_geom.set_local_scale(np.array([self._table_scale])) self._table_ref_geom.set_world_pose(position=self._table_position) self._table_ref_geom.set_default_state(position=self._table_position) lb = self._world.scene.compute_object_AABB(name=f"table_ref_geom") zmin = lb[0][2] zmax = lb[1][2] self._table_position[2] = -zmin self._table_height = zmax self._table_ref_geom.set_collision_approximation("none") self._convexIncludeRel.AddTarget(self._table_ref_geom.prim_path) ##tooling_plate self._tooling_plate_geom = self._world.scene.get_object(f"tooling_plate_geom") self._tooling_plate_geom.set_local_scale(np.array([self._table_scale])) lb = self._world.scene.compute_object_AABB(name=f"tooling_plate_geom") zmin = lb[0][2] zmax = lb[1][2] tooling_transform = self._tooling_plate_offset tooling_tranfsorm[2] = -zmin + self._table_height tooling_transform = tooling_transform + self._table_position self._tooling_plate_geom.set_world_pose(position=tooling_transform) self._tooling_plate_geom.set_default_state(position=tooling_transform) self._tooling_plate_geom.set_collision_approximation("boundingCube") self._table_height += zmax - zmin self._convexIncludeRel.AddTarget(self._tooling_plate_geom.prim_path) ##pipe self._pipe_geom = self._world.scene.get_object(f"pipe_geom") self._pipe_geom.set_local_scale(np.array([self._table_scale])) lb = self._world.scene.compute_object_AABB(name=f"pipe_geom") zmin = lb[0][2] zmax = lb[1][2] self._pipe_height = zmax - zmin pipe_transform = self._pipe_pos_on_table pipe_transform[2] = -zmin + self._table_height pipe_transform = pipe_transform + self._table_position self._pipe_geom.set_world_pose(position=pipe_transform, orientation=np.array([0, 0, 0, 1])) self._pipe_geom.set_default_state(position=pipe_transform, orientation=np.array([0, 0, 0, 1])) self._pipe_geom.set_collision_approximation("none") self._convexIncludeRel.AddTarget(self._pipe_geom.prim_path) await self._world.reset_async() async def _add_vibra_table(self): self._vibra_table_bottom_geom = self._world.scene.get_object(f"vibra_table_bottom_geom") self._vibra_table_bottom_geom.set_local_scale(np.array([self._table_scale])) lb = self._world.scene.compute_object_AABB(name=f"vibra_table_bottom_geom") zmin = lb[0][2] bot_part_pos = self._vibra_table_position_offset bot_part_pos[2] = -zmin + self._table_height bot_part_pos = bot_part_pos + self._table_position self._vibra_table_bottom_geom.set_world_pose(position=bot_part_pos) self._vibra_table_bottom_geom.set_default_state(position=bot_part_pos) self._vibra_table_bottom_geom.set_collision_approximation("none") self._convexIncludeRel.AddTarget(self._vibra_table_bottom_geom.prim_path) # clamps self._vibra_table_clamps_geom = self._world.scene.get_object(f"vibra_table_clamps_geom") self._vibra_table_clamps_geom.set_collision_approximation("none") self._convexIncludeRel.AddTarget(self._vibra_table_clamps_geom.prim_path) # vibra_table self._vibra_table_xform = self._world.scene.get_object(f"vibra_table_xform") self._vibra_table_position = bot_part_pos vibra_kinematic_prim = self._vibra_table_xform.prim rbApi = UsdPhysics.RigidBodyAPI.Apply(vibra_kinematic_prim.GetPrim()) rbApi.CreateRigidBodyEnabledAttr(True) rbApi.CreateKinematicEnabledAttr(True) # visual self._vibra_table_visual_xform = self._world.scene.get_object(f"vibra_table_visual_xform") self._vibra_table_visual_xform.set_world_pose(position=self._vibra_top_offset) self._vibra_table_visual_xform.set_default_state(position=self._vibra_top_offset) self._vibra_table_visual_xform.set_local_scale(np.array([self._table_scale])) # not clear why this makes a difference for the position (new bug although no change to code) self._vibra_table_visual_xform.prim.SetInstanceable(True) # collision self._vibra_table_collision_ref_geom = self._world.scene.get_object(f"vibra_table_collision_ref_geom") offset = self._vibra_top_offset + self._vibra_table_top_to_collider_offset self._vibra_table_collision_ref_geom.set_local_scale(np.array([1.0])) self._vibra_table_collision_ref_geom.set_world_pose(position=offset) self._vibra_table_collision_ref_geom.set_default_state(position=offset) self._vibra_table_collision_ref_geom.apply_physics_material(self._vibra_table_physics_material) self._convexIncludeRel.AddTarget(self._vibra_table_collision_ref_geom.prim_path) self._vibra_table_collision_ref_geom.set_collision_approximation("convexHull") vibra_kinematic_prim.SetInstanceable(True) self._vibra_table_xform.set_world_pose(position=self._vibra_table_position, orientation=np.array([0, 0, 0, 1])) self._vibra_table_xform.set_default_state( position=self._vibra_table_position, orientation=np.array([0, 0, 0, 1]) ) self._vibra_table_visual_xform.set_default_state( position=self._vibra_table_visual_xform.get_world_pose()[0], orientation=self._vibra_table_visual_xform.get_world_pose()[1], ) self._vibra_table_collision_ref_geom.set_default_state( position=self._vibra_table_collision_ref_geom.get_world_pose()[0], orientation=self._vibra_table_collision_ref_geom.get_world_pose()[1], ) await self._world.reset_async() async def _add_nuts_and_bolt(self, add_debug_nut=False): angle_delta = np.pi * 2.0 / self._num_bolts for j in range(self._num_bolts): self._bolt_geom = self._world.scene.get_object(f"bolt{j}_geom") self._bolt_geom.prim.SetInstanceable(True) bolt_pos = np.array(self._pipe_pos_on_table) + self._table_position bolt_pos[0] += np.cos(j * angle_delta) * self._bolt_radius bolt_pos[1] += np.sin(j * angle_delta) * self._bolt_radius bolt_pos[2] = self._bolt_z_offset_to_pipe + self._table_height self._bolt_geom.set_world_pose(position=bolt_pos) self._bolt_geom.set_default_state(position=bolt_pos) self._boltMeshIncludeRel.AddTarget(self._bolt_geom.prim_path) self._bolt_geom.apply_physics_material(self._bolt_physics_material) await self._generate_nut_initial_poses() for nut_idx in range(self._num_nuts): nut_pos = self._nut_init_poses[nut_idx, :3].copy() if add_debug_nut and nut_idx == 0: nut_pos[0] = 0.78 nut_pos[1] = self._vibra_table_nut_pickup_pos_offset[1] + self._vibra_table_position[1] # 0.0264 if add_debug_nut and nut_idx == 1: nut_pos[0] = 0.78 nut_pos[1] = 0.0264 - 0.04 self._nut_geom = self._world.scene.get_object(f"nut{nut_idx}_geom") self._nut_geom.prim.SetInstanceable(True) self._nut_geom.set_world_pose(position=np.array(nut_pos.tolist())) self._nut_geom.set_default_state(position=np.array(nut_pos.tolist())) physxRBAPI = PhysxSchema.PhysxRigidBodyAPI.Apply(self._nut_geom.prim) physxRBAPI.CreateSolverPositionIterationCountAttr().Set(self._solverPositionIterations) physxRBAPI.CreateSolverVelocityIterationCountAttr().Set(self._solverVelocityIterations) self._nut_geom.apply_physics_material(self._nut_physics_material) self._convexIncludeRel.AddTarget(self._nut_geom.prim_path + "/M20_Nut_Tight_Convex") self._nutMeshIncludeRel.AddTarget(self._nut_geom.prim_path + "/M20_Nut_Tight_SDF") rbApi3 = UsdPhysics.RigidBodyAPI.Apply(self._nut_geom.prim.GetPrim()) rbApi3.CreateRigidBodyEnabledAttr(True) physxAPI = PhysxSchema.PhysxRigidBodyAPI.Apply(self._nut_geom.prim.GetPrim()) physxAPI.CreateSleepThresholdAttr().Set(0.0) massAPI = UsdPhysics.MassAPI.Apply(self._nut_geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(self._mass_nut) await self._world.reset_async() async def _generate_nut_initial_poses(self): self._nut_init_poses = np.zeros((self._num_nuts, 7), dtype=np.float32) self._nut_init_poses[:, -1] = 1 # quat to identity nut_spiral_center = self._vibra_table_position + self._nut_spiral_center_vibra_offset nut_spiral_center += self._vibra_top_offset for nut_idx in range(self._num_nuts): self._nut_init_poses[nut_idx, :3] = np.array(nut_spiral_center) self._nut_init_poses[nut_idx, 0] += self._nut_radius * np.sin( np.pi / 3.0 * nut_idx ) + self._nut_dist_delta * (nut_idx // 6) self._nut_init_poses[nut_idx, 1] += self._nut_radius * np.cos( np.pi / 3.0 * nut_idx ) + self._nut_dist_delta * (nut_idx // 6) self._nut_init_poses[nut_idx, 2] += self._nut_height_delta * (nut_idx // 6) if self._randomize_nut_positions: self._nut_init_poses[nut_idx, 0] += self._rng.uniform( -self._nut_position_noise_minmax, self._nut_position_noise_minmax ) self._nut_init_poses[nut_idx, 1] += self._rng.uniform( -self._nut_position_noise_minmax, self._nut_position_noise_minmax ) await self._world.reset_async() async def _add_franka(self): self._franka = self._world.scene.get_object(f"franka") franka_pos = np.array(self._franka_position) franka_pos[2] = franka_pos[2] + self._table_height self._franka.set_world_pose(position=franka_pos) self._franka.set_default_state(position=franka_pos) self._franka.gripper.open() kps = np.array([6000000.0, 600000.0, 6000000.0, 600000.0, 25000.0, 15000.0, 25000.0, 15000.0, 15000.0]) kds = np.array([600000.0, 60000.0, 300000.0, 30000.0, 3000.0, 3000.0, 3000.0, 6000.0, 6000.0]) self._franka.get_articulation_controller().set_gains(kps=kps, kds=kds, save_to_usd=True) self._frankaHandIncludeRel.AddTarget(self._franka.prim_path + "/panda_leftfinger") self._frankaHandIncludeRel.AddTarget(self._franka.prim_path + "/panda_rightfinger") franka_left_finger = self._world.stage.GetPrimAtPath( "/World/env/franka/panda_leftfinger/geometry/panda_leftfinger" ) x = UsdShade.MaterialBindingAPI.Apply(franka_left_finger) x.Bind( self._franka_finger_physics_material.material, bindingStrength="weakerThanDescendants", materialPurpose="physics", ) franka_right_finger = self._world.stage.GetPrimAtPath( "/World/env/franka/panda_rightfinger/geometry/panda_rightfinger" ) x2 = UsdShade.MaterialBindingAPI.Apply(franka_right_finger) x2.Bind( self._franka_finger_physics_material.material, bindingStrength="weakerThanDescendants", materialPurpose="physics", ) await self._world.reset_async() def _setup_simulation(self): self._scene = PhysicsContext() self._scene.set_solver_type(self._solver_type) self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) self._scene.set_friction_offset_threshold(0.01) self._scene.set_friction_correlation_distance(0.0005) self._scene.set_gpu_total_aggregate_pairs_capacity(10 * 1024) self._scene.set_gpu_found_lost_pairs_capacity(10 * 1024) self._scene.set_gpu_heap_capacity(64 * 1024 * 1024) self._scene.set_gpu_found_lost_aggregate_pairs_capacity(10 * 1024) # added because of new errors regarding collisionstacksize physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(get_prim_at_path("/physicsScene")) physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(76000000) # or whatever min is needed # group to include SDF mesh of nut only self._meshCollisionGroup = UsdPhysics.CollisionGroup.Define( self._world.scene.stage, "/World/collisionGroups/meshColliders" ) collectionAPI = Usd.CollectionAPI.Apply(self._meshCollisionGroup.GetPrim(), "colliders") self._nutMeshIncludeRel = collectionAPI.CreateIncludesRel() # group to include all convex collision (nut convex, pipe, table, vibrating table, other small assets on the table) self._convexCollisionGroup = UsdPhysics.CollisionGroup.Define( self._world.scene.stage, "/World/collisionGroups/convexColliders" ) collectionAPI = Usd.CollectionAPI.Apply(self._convexCollisionGroup.GetPrim(), "colliders") self._convexIncludeRel = collectionAPI.CreateIncludesRel() # group to include bolt prim only (only has SDF mesh) self._boltCollisionGroup = UsdPhysics.CollisionGroup.Define( self._world.scene.stage, "/World/collisionGroups/boltColliders" ) collectionAPI = Usd.CollectionAPI.Apply(self._boltCollisionGroup.GetPrim(), "colliders") self._boltMeshIncludeRel = collectionAPI.CreateIncludesRel() # group to include the franka hands prims only self._frankaHandCollisionGroup = UsdPhysics.CollisionGroup.Define( self._world.scene.stage, "/World/collisionGroups/frankaHandColliders" ) collectionAPI = Usd.CollectionAPI.Apply(self._frankaHandCollisionGroup.GetPrim(), "colliders") self._frankaHandIncludeRel = collectionAPI.CreateIncludesRel() # invert group logic so only groups that filter each-other will collide: self._scene.set_invert_collision_group_filter(True) # # the SDF mesh collider nuts should only collide with the bolts filteredRel = self._meshCollisionGroup.CreateFilteredGroupsRel() filteredRel.AddTarget("/World/collisionGroups/boltColliders") # # the convex hull nuts should collide with other nuts, the vibra table, table, pipe and small assets on the table. # It should also collide with the franka grippers filteredRel = self._convexCollisionGroup.CreateFilteredGroupsRel() filteredRel.AddTarget("/World/collisionGroups/convexColliders") filteredRel.AddTarget("/World/collisionGroups/frankaHandColliders") # # the SDF mesh bolt only collides with the SDF mesh nut colliders # and with the franka grippers filteredRel = self._boltCollisionGroup.CreateFilteredGroupsRel() filteredRel.AddTarget("/World/collisionGroups/meshColliders") filteredRel.AddTarget("/World/collisionGroups/frankaHandColliders") async def setup_pre_reset(self): return async def setup_post_reset(self): self._controller._vibraSM.reset() self._controller._vibraSM._i = 2 self._controller.reset(franka=self._franka) self._controller._i = self._controller._vibraSM._i self._franka.gripper.open() self._controller._vibraSM.start_feed() await self._world.play_async() return def world_cleanup(self): self._controller = None self._group_f.create_dataset(f"sim_time", data=self._sim_time_list, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_positions", data=self._joint_positions, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_velocities", data=self._joint_velocities, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"hand_camera", data=self._camera1_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"top_camera", data=self._camera2_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"front_camera", data=self._camera3_img, compression='gzip', compression_opts=9) self._f.close() print("Data saved") self._save_count = 0 self._world.pause() return
31,138
Python
48.115142
126
0.638898
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaNutsTable/nut_vibra_table_controller.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np class VibraFSM: _amplitudes = { "stop": np.array((0.0, 0.0, 0.0), dtype=np.float32), # [m] "run_feed": np.array((0.000, 0.03, 0.02), dtype=np.float32), # [m] "backward": np.array((0.000, -0.03, 0.02), dtype=np.float32), # [m] "realign": np.array((-0.03, 0.0, 0.02), dtype=np.float32), # [m] } _motion_frequency = 60.0 # [Hz] # configure unblock-cycle: _feed_time = 3.5 _stop_time = 5.0 _backward_time = 0.75 _realign_time = 0.75 def __init__(self, dt=None): self.reset() self._i = 2 if dt is not None: self._dt = dt def reset(self): self._dt = 1.0 / 240.0 self._time = 0.0 self.state = "stop" self._after_delay_state = None def start_feed(self): self.state = "run_feed" # kick off unblock cycle self._set_delayed_state_change(delay_sec=self._feed_time, nextState="backward") def stop_feed_after_delay(self, delay_sec: float): self.state = "run_feed" self._set_delayed_state_change(delay_sec=delay_sec, nextState="stop") def _set_delayed_state_change(self, delay_sec: float, nextState: str): self._after_delay_state = nextState self._wait_end_time = self._time + delay_sec def update(self): self._time += self._dt # process wait if necessary if self._after_delay_state is not None and self._time > self._wait_end_time: self.state = self._after_delay_state # auto-unblock cycle if self._state == "run_feed": self.stop_feed_after_delay(self._stop_time) elif self._state == "backward": self._set_delayed_state_change(delay_sec=self._backward_time, nextState="realign") elif self._state == "realign": self._set_delayed_state_change(delay_sec=self._realign_time, nextState="run_feed") else: self._after_delay_state = None return self._motion_amplitude def is_stopped(self): return self._state == "stop" def is_stopping(self): return self.is_stopped() or self._after_delay_state == "stop" @property def state(self): return self._state @state.setter def state(self, newState): self._state = newState if self._state in self._amplitudes: self._motion_amplitude = self._amplitudes[self._state] else: self._motion_amplitude = self._amplitudes["stop"]
2,983
Python
34.105882
98
0.601408
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/LimoAckermannROS2/limo_ackermann.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.physx.scripts import deformableUtils, physicsUtils import omni.graph.core as og import numpy as np import usdrt.Sdf import omni import carb class LimoAckermann(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self.TRACK_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/LIMO_simulation_table.usd" # self.ROBOT_PATH = self._server_root + "/Projects/RBROS2/WheeledRobot/limo_diff_thin.usd" self.ROBOT_PATH = self._server_root + "/Projects/WegoLimo/Limo/limo_ackermann.usd" # omniverse://localhost/Projects/WegoLimo/Limo/limo_ackermann.usd self._domain_id = 30 self._maxWheelRotation = 1e6 self._maxWheelVelocity = 1e6 self._trackWidth = 0.13 self._turningWheelRadius = 0.045 self._wheelBase = 0.2 self._targetPrim = "/World/Limo/base_link" self._robotPath = "/World/Limo/base_link" self._cameraPath = "/World/Limo/depth_link/rgb_camera" return def og_setup(self): try: og.Controller.edit( {"graph_path": "/ROS2Ackermann", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("subscribeTwist", "omni.isaac.ros2_bridge.ROS2SubscribeTwist"), ("scaleToFromStage", "omni.isaac.core_nodes.OgnIsaacScaleToFromStageUnit"), ("angVelBreak", "omni.graph.nodes.BreakVector3"), ("linVelBreak", "omni.graph.nodes.BreakVector3"), ("wheelbase", "omni.graph.nodes.ConstantDouble"), ("multiply", "omni.graph.nodes.Multiply"), ("atan2", "omni.graph.nodes.ATan2"), ("toRad", "omni.graph.nodes.ToRad"), ("ackermannCtrl", "omni.isaac.wheeled_robots.AckermannSteering"), ("wheelJointNames", "omni.graph.nodes.ConstructArray"), ("wheelRotationVel", "omni.graph.nodes.ConstructArray"), ("hingeJointNames", "omni.graph.nodes.ConstructArray"), ("hingePosVel", "omni.graph.nodes.ConstructArray"), ("articulationRotation", "omni.isaac.core_nodes.IsaacArticulationController"), ("articulationPosition", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("subscribeTwist.inputs:topicName", "cmd_vel"), ("wheelbase.inputs:value", self._wheelBase), ("ackermannCtrl.inputs:maxWheelRotation", self._maxWheelRotation), ("ackermannCtrl.inputs:maxWheelVelocity", self._maxWheelVelocity), ("ackermannCtrl.inputs:trackWidth", self._trackWidth), ("ackermannCtrl.inputs:turningWheelRadius", self._turningWheelRadius), ("ackermannCtrl.inputs:useAcceleration", False), ("wheelJointNames.inputs:arraySize", 4), ("wheelJointNames.inputs:arrayType", "token[]"), ("wheelJointNames.inputs:input0", "rear_left_wheel"), ("wheelJointNames.inputs:input1", "rear_right_wheel"), ("wheelJointNames.inputs:input2", "front_left_wheel"), ("wheelJointNames.inputs:input3", "front_right_wheel"), ("hingeJointNames.inputs:arraySize", 2), ("hingeJointNames.inputs:arrayType", "token[]"), ("hingeJointNames.inputs:input0", "left_steering_hinge_wheel"), ("hingeJointNames.inputs:input1", "right_steering_hinge_wheel"), ("wheelRotationVel.inputs:arraySize", 4), ("wheelRotationVel.inputs:arrayType", "double[]"), ("hingePosVel.inputs:arraySize", 2), ("hingePosVel.inputs:arrayType", "double[]"), ("articulationRotation.inputs:targetPrim", [usdrt.Sdf.Path(self._targetPrim)]), ("articulationRotation.inputs:robotPath", self._targetPrim), ("articulationRotation.inputs:usePath", False), ("articulationPosition.inputs:targetPrim", [usdrt.Sdf.Path(self._targetPrim)]), ("articulationPosition.inputs:robotPath", self._targetPrim), ("articulationPosition.inputs:usePath", False), ], og.Controller.Keys.CREATE_ATTRIBUTES: [ ("wheelJointNames.inputs:input1", "token"), ("wheelJointNames.inputs:input2", "token"), ("wheelJointNames.inputs:input3", "token"), ("hingeJointNames.inputs:input1", "token"), ("wheelRotationVel.inputs:input1", "double"), ("wheelRotationVel.inputs:input2", "double"), ("wheelRotationVel.inputs:input3", "double"), ("hingePosVel.inputs:input1", "double"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "subscribeTwist.inputs:execIn"), ("context.outputs:context", "subscribeTwist.inputs:context"), ("subscribeTwist.outputs:linearVelocity", "scaleToFromStage.inputs:value"), ("scaleToFromStage.outputs:result", "linVelBreak.inputs:tuple"), ("subscribeTwist.outputs:angularVelocity", "angVelBreak.inputs:tuple"), ("subscribeTwist.outputs:execOut", "ackermannCtrl.inputs:execIn"), ("angVelBreak.outputs:z", "multiply.inputs:a"), ("linVelBreak.outputs:x", "ackermannCtrl.inputs:speed"), ("wheelbase.inputs:value", "multiply.inputs:b"), ("wheelbase.inputs:value", "ackermannCtrl.inputs:wheelBase"), ("multiply.outputs:product", "atan2.inputs:a"), ("linVelBreak.outputs:x", "atan2.inputs:b"), ("atan2.outputs:result", "toRad.inputs:degrees"), ("toRad.outputs:radians", "ackermannCtrl.inputs:steeringAngle"), ("ackermannCtrl.outputs:leftWheelAngle", "hingePosVel.inputs:input0"), ("ackermannCtrl.outputs:rightWheelAngle", "hingePosVel.inputs:input1"), ("ackermannCtrl.outputs:wheelRotationVelocity", "wheelRotationVel.inputs:input0"), ("ackermannCtrl.outputs:wheelRotationVelocity", "wheelRotationVel.inputs:input1"), ("ackermannCtrl.outputs:wheelRotationVelocity", "wheelRotationVel.inputs:input2"), ("ackermannCtrl.outputs:wheelRotationVelocity", "wheelRotationVel.inputs:input3"), ("ackermannCtrl.outputs:execOut", "articulationRotation.inputs:execIn"), ("wheelJointNames.outputs:array", "articulationRotation.inputs:jointNames"), ("wheelRotationVel.outputs:array", "articulationRotation.inputs:velocityCommand"), ("ackermannCtrl.outputs:execOut", "articulationPosition.inputs:execIn"), ("hingeJointNames.outputs:array", "articulationPosition.inputs:jointNames"), ("hingePosVel.outputs:array", "articulationPosition.inputs:positionCommand"), ], }, ) og.Controller.edit( {"graph_path": "/ROS2Odom", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("computeOdom", "omni.isaac.core_nodes.IsaacComputeOdometry"), ("publishOdom", "omni.isaac.ros2_bridge.ROS2PublishOdometry"), ("publishRawTF", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("computeOdom.inputs:chassisPrim", [usdrt.Sdf.Path(self._targetPrim)]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "computeOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishRawTF.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishOdom.inputs:timeStamp"), ("readSimTime.outputs:simulationTime", "publishRawTF.inputs:timeStamp"), ("context.outputs:context", "publishOdom.inputs:context"), ("context.outputs:context", "publishRawTF.inputs:context"), ("computeOdom.outputs:angularVelocity", "publishOdom.inputs:angularVelocity"), ("computeOdom.outputs:linearVelocity", "publishOdom.inputs:linearVelocity"), ("computeOdom.outputs:orientation", "publishOdom.inputs:orientation"), ("computeOdom.outputs:position", "publishOdom.inputs:position"), ("computeOdom.outputs:orientation", "publishRawTF.inputs:rotation"), ("computeOdom.outputs:position", "publishRawTF.inputs:translation"), ], }, ) # Camera OG og.Controller.edit( {"graph_path": "/ROS2Camera", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("renderer", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("DepthPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("renderer.inputs:cameraPrim", [usdrt.Sdf.Path(self._cameraPath)]), ("RGBPublish.inputs:topicName", "/limo/rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("RGBPublish.inputs:frameId", "limo_rgbd_frame"), ("DepthPublish.inputs:topicName", "/limo/depth"), ("DepthPublish.inputs:type", "depth"), ("DepthPublish.inputs:resetSimulationTimeOnStop", True), ("DepthPublish.inputs:frameId", "limo_rgbd_frame"), ("CameraInfoPublish.inputs:topicName", "/limo/camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:frameId", "limo_rgbd_frame"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "renderer.inputs:execIn"), ("context.outputs:context", "RGBPublish.inputs:context"), ("context.outputs:context", "DepthPublish.inputs:context"), ("context.outputs:context", "CameraInfoPublish.inputs:context"), ("renderer.outputs:execOut", "RGBPublish.inputs:execIn"), ("renderer.outputs:execOut", "DepthPublish.inputs:execIn"), ("renderer.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("renderer.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "DepthPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ], }, ) except Exception as e: print(e) def add_background(self): add_reference_to_stage(usd_path=self.TRACK_PATH, prim_path="/World/LimoTrack") bg_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/LimoTrack") physicsUtils.set_or_add_scale_op(bg_mesh, scale=Gf.Vec3f(0.01, 0.01, 0.01)) def add_robot(self): add_reference_to_stage(usd_path=self.ROBOT_PATH, prim_path="/World/Limo") limo_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/Limo") physicsUtils.set_or_add_translate_op(limo_mesh, translate=Gf.Vec3f(0.0, -0.18, 0.0)) def add_light(self): distantLight1 = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/distantLight1")) distantLight1.CreateIntensityAttr(3000) distantLight1.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 0.0)) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.add_background() self.add_light() self.add_robot() self.og_setup() self._save_count = 0 return async def setup_post_load(self): self._world = self.get_world() # self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._world.pause() return async def setup_post_reset(self): await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
15,868
Python
57.992565
109
0.560814
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloDeformable/hello_deformable.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np from omni.isaac.core.materials.deformable_material import DeformableMaterial from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.physx.scripts import deformableUtils, physicsUtils from omni.isaac.examples.base_sample import BaseSample from pxr import UsdGeom, Gf, UsdPhysics import omni.physx import omni.usd import omni class HelloDeformable(BaseSample): def __init__(self) -> None: super().__init__() return def _setup_simulation(self): self._scene = PhysicsContext() self._scene.set_solver_type("TGS") self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) def setup_scene(self): world = self.get_world() self._setup_simulation() stage = omni.usd.get_context().get_stage() world.scene.add_default_ground_plane() # Create cube result, path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube") omni.kit.commands.execute("MovePrim", path_from=path, path_to="/World/cube") omni.usd.get_context().get_selection().set_selected_prim_paths([], False) cube_mesh = UsdGeom.Mesh.Get(stage, "/World/cube") physicsUtils.set_or_add_translate_op(cube_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.5)) # physicsUtils.set_or_add_orient_op(cube_mesh, orient=Gf.Quatf(0.707, 0.707, 0, 0)) physicsUtils.set_or_add_scale_op(cube_mesh, scale=Gf.Vec3f(0.1, 0.1, 0.1)) cube_mesh.CreateDisplayColorAttr([(1.0, 0.0, 0.0)]) # Apply PhysxDeformableBodyAPI and PhysxCollisionAPI to skin mesh and set parameter to default values deformableUtils.add_physx_deformable_body( stage, "/World/cube", collision_simplification=True, simulation_hexahedral_resolution=10, self_collision=False, ) # Create a deformable body material and set it on the deformable body deformable_material_path = omni.usd.get_stage_next_free_path(stage, "/World/deformableBodyMaterial", True) deformableUtils.add_deformable_body_material( stage, deformable_material_path, youngs_modulus=10000.0, poissons_ratio=0.49, damping_scale=0.0, dynamic_friction=0.5, ) physicsUtils.add_physics_material_to_prim(stage, stage.GetPrimAtPath("/World/cube"), "/World/cube") async def setup_post_load(self): self._world = self.get_world() return
3,016
Python
37.679487
114
0.671088
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaFactory/franka_factory.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample import numpy as np from omni.isaac.core.prims.geometry_prim import GeometryPrim # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import Sdf, UsdLux, Gf from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.core import SimulationContext import carb import omni from .franka_playing import FrankaPlaying class FrankaGarage(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._franka_position = np.array([-0.8064, 1.3602, 0.0]) # (w, x, y, z) self._franka_rotation = np.array([0.0, 0.0, 0.0, 1.0]) self._table_scale = 0.01 self._table_height = 0.0 self._table_position = np.array([-0.7, 1.8, 0.007]) # Gf.Vec3f(0.5, 0.0, 0.0) self._bin_scale = np.array([1.5, 1.5, 0.5]) self._bin1_position = np.array([-0.5, 2.1, 0.90797]) self._bin2_position = np.array([-0.5, 1.6, 0.90797]) return def add_background(self): self._world = self.get_world() self._nucleus_server = get_assets_root_path() bg_path = self._server_root + "/Projects/RBROS2/ConveyorGarage/Franka_Garage_Empty.usd" add_reference_to_stage(usd_path=bg_path, prim_path=f"/World/Franka_Garage") def add_bin(self): bin_path = self._nucleus_server + "/Isaac/Props/KLT_Bin/small_KLT_visual.usd" # bin 1 add_reference_to_stage(usd_path=bin_path, prim_path="/World/bin1") self._world.scene.add(GeometryPrim(prim_path="/World/bin1", name=f"bin1_ref_geom", collision=True)) self._bin1_ref_geom = self._world.scene.get_object(f"bin1_ref_geom") self._bin1_ref_geom.set_local_scale(np.array([self._bin_scale])) self._bin1_ref_geom.set_world_pose(position=self._bin1_position) self._bin1_ref_geom.set_default_state(position=self._bin1_position) # bin 2 add_reference_to_stage(usd_path=bin_path, prim_path="/World/bin2") self._world.scene.add(GeometryPrim(prim_path="/World/bin2", name=f"bin2_ref_geom", collision=True)) self._bin2_ref_geom = self._world.scene.get_object(f"bin2_ref_geom") self._bin2_ref_geom.set_local_scale(np.array([self._bin_scale])) self._bin2_ref_geom.set_world_pose(position=self._bin2_position) self._bin2_ref_geom.set_default_state(position=self._bin2_position) def add_light(self): stage = omni.usd.get_context().get_stage() distantLight = UsdLux.CylinderLight.Define(stage, Sdf.Path("/World/cylinderLight")) distantLight.CreateIntensityAttr(60000) distantLight.AddTranslateOp().Set(Gf.Vec3f(-1.2, 0.9, 3.0)) distantLight.AddScaleOp().Set((0.1, 4.0, 0.1)) distantLight.AddRotateXYZOp().Set((0, 0, 90)) async def add_table(self): table_path = self._nucleus_server + "/Isaac/Samples/Examples/FrankaNutBolt/SubUSDs/Shop_Table/Shop_Table.usd" add_reference_to_stage(usd_path=table_path, prim_path="/World/table") self._world.scene.add(GeometryPrim(prim_path="/World/table", name=f"table_ref_geom", collision=True)) self._table_ref_geom = self._world.scene.get_object(f"table_ref_geom") self._table_ref_geom.set_local_scale(np.array([self._table_scale])) self._table_ref_geom.set_world_pose(position=self._table_position) self._table_ref_geom.set_default_state(position=self._table_position) lb = self._world.scene.compute_object_AABB(name=f"table_ref_geom") zmin = lb[0][2] zmax = lb[1][2] self._table_height = zmax async def add_controller(self): self._franka = self._world.scene.get_object("franka") self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, end_effector_initial_height=1.1, ) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() self.add_background() self.add_light() self.add_bin() self._world.add_task( FrankaPlaying( name="franka_task", object="tuna_fish_can" )) return async def setup_post_load(self): self._world = self.get_world() self._world.scene.enable_bounding_boxes_computations() # for compute_object_AABB from add_table, # bounding box computations should be enabled before, # which means we need to wait for the scene to be loaded # That's for franka too. await self.add_table() await self.add_controller() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique return def physics_callback(self, step_size): current_observations = self._world.get_observations() object_position = current_observations["object"]["position"] # print(object_position) if object_position[1] > 1.25: print("picking and placing") actions = self._controller.forward( picking_position=object_position, placing_position=current_observations["object"]["goal_position"], current_joint_positions=current_observations["franka"]["joint_positions"], end_effector_orientation=euler_angles_to_quat( # np.array([0, np.pi, -np.pi/2]) np.array([0, np.pi, 0]) ), ) if self._controller.is_done(): print("done picking and placing") self._franka.apply_action(actions) return # async def setup_pre_reset(self): # return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def world_cleanup(self): self._world.pause() return
7,073
Python
37.032258
121
0.639333
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaFactory/franka_playing.py
from omni.isaac.core.tasks import BaseTask from omni.isaac.franka.franka import Franka from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.prims.geometry_prim import GeometryPrim from pxr import Gf, PhysxSchema, Usd, UsdPhysics, UsdShade, UsdGeom, Sdf, Tf, UsdLux from omni.physx.scripts import utils import numpy as np import omni def createRigidBody(stage, primPath): bodyPrim = stage.GetPrimAtPath(primPath) # UsdPhysics.RigidBodyAPI.Apply(bodyPrim) utils.setRigidBody(bodyPrim, "convexDecomposition", False) def addObjectsGeom(scene, name, scale, ini_pos, mass, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) object_scale = np.array([1.0, 1.0, 1.0]) if isinstance(scale, float): object_scale = np.array([scale, scale, scale]) elif isinstance(scale, list): object_scale = np.array(scale) geom.set_local_scale(object_scale) geom.set_world_pose(position=ini_pos) geom.set_default_state(position=ini_pos, orientation=orientation) massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom class FrankaPlaying(BaseTask): def __init__(self, name, object="sugar_box"): super().__init__(name=name, offset=None) self._object = object self._franka_position = np.array([-1.0, 1.8602, 0.8976]) self._franka_rotation = np.array([0.0, 0.0, 0.0, 1.0]) # (w, x, y, z) self._sugar_box_scale = 0.7 self._tomato_soup_scale = 1.0 self._tuna_fish_can_scale = 0.9 # self._obj_ini_pos = np.array([-1.0, 1.4, 1.0]) # self._obj_ini_pos = np.array([-1.5, 1.29418, 0.79]) self._obj_ini_pos = np.array([8.0, -0.05, 0.8]) self._obj_target = np.array([-0.5, 2.17, 1.1]) return def add_franka(self, scene): self._franka = scene.add( Franka( prim_path="/World/franka", name="franka", gripper_open_position=np.array([0.2, 0.2]) / get_stage_units() ) ) # adjust franka position self._franka = scene.get_object(f"franka") franka_pos = np.array(self._franka_position) self._franka.set_world_pose( position=franka_pos, orientation=self._franka_rotation ) def add_sugar_box(self, scene): sugar_box_path = get_assets_root_path() + "/Isaac/Props/YCB/Axis_Aligned_Physics/004_sugar_box.usd" add_reference_to_stage(usd_path=sugar_box_path, prim_path="/World/sugar_box") self._sugar_box_ref_geom = addObjectsGeom(scene, "sugar_box", self._sugar_box_scale, self._obj_ini_pos, 0.02) def add_tomato_soup_can(self, scene): tomato_soup_can_path = get_assets_root_path() + "/Isaac/Props/YCB/Axis_Aligned_Physics/005_tomato_soup_can.usd" add_reference_to_stage(usd_path=tomato_soup_can_path, prim_path="/World/tomato_soup_can") self._tomato_soup_can_ref_geom = addObjectsGeom(scene, "tomato_soup_can", self._tomato_soup_scale, self._obj_ini_pos, 0.02) def add_tuna_fish_can(self, scene): tuna_fish_can_path = get_assets_root_path() + "/Isaac/Props/YCB/Axis_Aligned/007_tuna_fish_can.usd" add_reference_to_stage(usd_path=tuna_fish_can_path, prim_path="/World/tuna_fish_can") # give rigid body property for visual only objects stage = omni.usd.get_context().get_stage() createRigidBody(stage, "/World/tuna_fish_can") orientation = np.array([ 0.7071068, 0.7071068, 0.0, 0.0 ]) self._tuna_fish_can_ref_geom = addObjectsGeom(scene, "tuna_fish_can", self._tuna_fish_can_scale, self._obj_ini_pos, 0.02, orientation) def set_up_scene(self, scene): super().set_up_scene(scene) self.add_franka(scene) if self._object == "sugar_box": self.add_sugar_box(scene) elif self._object == "tomato_soup_can": self.add_tomato_soup_can(scene) elif self._object == "tuna_fish_can": self.add_tuna_fish_can(scene) return def get_observations(self): current_joint_positions = self._franka.get_joint_positions() currnet_joint_velocities = self._franka.get_joint_velocities() observations = { self._franka.name: { "joint_positions": current_joint_positions, "joint_velocities": currnet_joint_velocities, }, } if self._object == "sugar_box": sugar_box_position, _ = self._sugar_box_ref_geom.get_world_pose() observations["object"] = { "position": sugar_box_position, "goal_position": self._obj_target } elif self._object == "tomato_soup_can": tomato_soup_can_position, _ = self._tomato_soup_can_ref_geom.get_world_pose() observations["object"] = { "position": tomato_soup_can_position, "goal_position": self._obj_target } elif self._object == "tuna_fish_can": tuna_fish_can_position, _ = self._tuna_fish_can_ref_geom.get_world_pose() observations["object"] = { "position": tuna_fish_can_position, "goal_position": self._obj_target } return observations def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._task_achieved = False return
5,904
Python
36.373417
142
0.609417
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaNuts/franka_nuts_pick_and_place.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.tasks import BaseTask from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.prims.rigid_prim import RigidPrim from omni.isaac.sensor import Camera import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from pxr import PhysxSchema from datetime import datetime import numpy as np import h5py class FrankaPlaying(BaseTask): #NOTE: we only cover here a subset of the task functions that are available, # checkout the base class for all the available functions to override. # ex: calculate_metrics, is_done..etc. def __init__(self, name): super().__init__(name=name, offset=None) self._num_nuts = 2 self._num_bins = 2 # Asset Path from Nucleus self._bin_asset_path = get_assets_root_path() + "/Isaac/Props/KLT_Bin/small_KLT.usd" self._nut_asset_path = get_assets_root_path() + "/Isaac/Samples/Examples/FrankaNutBolt/SubUSDs/Nut/M20_Nut_Tight_R256_Franka_SI.usd" self._bin_position = np.array([ [ 0.35, -0.25, 0.1], [ 0.35, 0.25, 0.1], ]) self._bins = [] self._bins_offset = 0.1 self._nuts_position = np.array([ [0.35, -0.22, 0.2], [0.30, -0.28, 0.2], ]) self._nuts = [] self._nuts_offset = 0.005 self._goal_position = np.array([ [0.35, 0.18, 0.2], [0.30, 0.25, 0.2], ]) self._pick_position = np.array([0, 0, 0]) self._task_achieved = False self._task_event = 0 return def setup_cameras(self): # Exception: You can not define translation and position at the same time self._camera1 = Camera( prim_path="/World/Fancy_Franka/panda_hand/hand_camera", # position=np.array([0.088, 0.0, 0.926]), translation=np.array([0.1, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 180, -90 - 25, 0 ]), degrees=True), ) self._camera1.set_clipping_range(0.1, 1000000.0) self._camera1.initialize() self._camera1.add_motion_vectors_to_frame() self._camera1.set_visibility(False) self._camera2 = Camera( prim_path="/World/top_camera", position=np.array([0.0, 0.0, 5.0]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 90, 0 ]), degrees=True), ) self._camera2.initialize() self._camera2.set_visibility(False) self._camera3 = Camera( prim_path="/World/front_camera", position=np.array([1.0, 0.0, 0.3]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0, 0, 180 ]), degrees=True), ) self._camera3.set_clipping_range(0.1, 1000000.0) self._camera3.set_focal_length(1.0) self._camera3.initialize() self._camera3.set_visibility(False) return def setup_bins(self, scene): for bins in range(self._num_bins): add_reference_to_stage( usd_path=self._bin_asset_path, prim_path=f"/World/bin{bins}", ) _bin = scene.add( RigidPrim( prim_path=f"/World/bin{bins}", name=f"bin{bins}", position=self._bin_position[bins] / get_stage_units(), orientation=euler_angles_to_quat(np.array([np.pi, 0., 0.])), mass=0.1, # kg ) ) self._bins.append(_bin) return def setup_nuts(self, scene): for nut in range(self._num_nuts): add_reference_to_stage( usd_path=self._nut_asset_path, prim_path=f"/World/nut{nut}", ) nut = scene.add( GeometryPrim( prim_path=f"/World/nut{nut}", name=f"nut{nut}_geom", position=self._nuts_position[nut] / get_stage_units(), collision=True, # mass=0.1, # kg ) ) self._nuts.append(nut) return # Here we setup all the assets that we care about in this task. def set_up_scene(self, scene): super().set_up_scene(scene) scene.add_default_ground_plane() self._franka = scene.add( Franka( prim_path="/World/Fancy_Franka", name="fancy_franka" ) ) self.setup_cameras() self.setup_bins(scene) self.setup_nuts(scene) return # Information exposed to solve the task is returned from the task through get_observations def get_observations(self): current_joint_positions = self._franka.get_joint_positions() currnet_joint_velocities = self._franka.get_joint_velocities() self._pick_position1, _ = self._nuts[0].get_world_pose() self._pick_position1[2] += self._nuts_offset self._pick_position2, _ = self._nuts[1].get_world_pose() self._pick_position2[2] += self._nuts_offset observations = { self._franka.name: { "joint_positions": current_joint_positions, "joint_velocities": currnet_joint_velocities, }, "nut0_geom": { "position": self._pick_position1, "goal_position": self._goal_position[0], }, "nut1_geom": { "position": self._pick_position2, "goal_position": self._goal_position[1], }, } return observations # Called before each physics step, # for instance we can check here if the task was accomplished by # changing the color of the cube once its accomplished def pre_step(self, control_index, simulation_time): return # Called after each reset, # for instance we can always set the gripper to be opened at the beginning after each reset # also we can set the cube's color to be blue def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) # self._nuts[0].get_applied_visual_material().set_color(color=np.array([0, 0, 1.0])) self._task_achieved = False return @property def camera1(self): return self._camera1 @property def camera2(self): return self._camera2 @property def camera3(self): return self._camera3 class FrankaNutsBasic(BaseSample): def __init__(self) -> None: super().__init__() # some global sim options: self._time_steps_per_second = 240 # 4.167ms aprx self._fsm_update_rate = 60 self._solverPositionIterations = 4 self._solverVelocityIterations = 1 self._solver_type = "TGS" self._ik_damping = 0.1 self._event = 0 self._step_size = 0.01 return def _setup_simulation(self): self._scene = PhysicsContext() self._scene.set_solver_type(self._solver_type) self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) self._scene.set_friction_offset_threshold(0.01) self._scene.set_friction_correlation_distance(0.0005) self._scene.set_gpu_total_aggregate_pairs_capacity(10 * 1024) self._scene.set_gpu_found_lost_pairs_capacity(10 * 1024) self._scene.set_gpu_heap_capacity(64 * 1024 * 1024) self._scene.set_gpu_found_lost_aggregate_pairs_capacity(10 * 1024) # added because of new errors regarding collisionstacksize physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(get_prim_at_path("/physicsScene")) physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(76000000) # or whatever min is needed def setup_dataset(self): self._f = None self._sim_time_list = [] self._joint_positions = [] self._joint_velocities = [] self._camera1_img = [] self._camera2_img = [] self._camera3_img = [] now = datetime.now() # current date and time date_time_str = now.strftime("%m_%d_%Y_%H_%M_%S") file_name = f'franka_nuts_basis_{date_time_str}.hdf5' print(file_name) self._f = h5py.File(file_name,'w') self._group_f = self._f.create_group("isaac_dataset") self._save_count = 0 self._img_f = self._group_f.create_group("camera_images") return def setup_scene(self): print("setup_scene") world = self.get_world() self.simulation_context = SimulationContext() self._setup_simulation() self.setup_dataset() # We add the task to the world here self._franka_playing = FrankaPlaying(name="my_first_task") world.add_task(self._franka_playing) return async def setup_post_load(self): print("setup_post_load") self._world = self.get_world() # The world already called the setup_scene from the task (with first reset of the world) # so we can retrieve the task objects self._franka = self._world.scene.get_object("fancy_franka") self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) self._camera1 = self._franka_playing.camera1 self._camera2 = self._franka_playing.camera2 self._camera3 = self._franka_playing.camera3 self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_pre_reset(self): if self._f is not None: self._f.close() self._f = None elif self._f is None: print("Create new file for new data collection...") self.setup_dataset() self._save_count = 0 self._event = 0 return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def physics_step(self, step_size): # Gets all the tasks observations self._camera1.get_current_frame() self._camera2.get_current_frame() self._camera3.get_current_frame() current_observations = self._world.get_observations() current_time = self.simulation_context.current_time current_joint_pos = current_observations["fancy_franka"]["joint_positions"] current_joint_vel = current_observations["fancy_franka"]["joint_velocities"] # print(step_size) if self._save_count % 100 == 0: if current_joint_pos is not None and current_joint_vel is not None: self._sim_time_list.append(current_time) self._joint_positions.append(current_joint_pos) self._joint_velocities.append(current_joint_vel) self._camera1_img.append(self._camera1.get_rgba()[:, :, :3]) self._camera2_img.append(self._camera2.get_rgba()[:, :, :3]) self._camera3_img.append(self._camera3.get_rgba()[:, :, :3]) print("Collecting data...") if self._event == 0: actions = self._controller.forward( picking_position=current_observations["nut0_geom"]["position"], placing_position=current_observations["nut0_geom"]["goal_position"], current_joint_positions=current_joint_pos, ) self._franka.apply_action(actions) elif self._event == 1: actions = self._controller.forward( picking_position=current_observations["nut1_geom"]["position"], placing_position=current_observations["nut1_geom"]["goal_position"], current_joint_positions=current_joint_pos, ) self._franka.apply_action(actions) self._save_count += 1 if self._controller.is_done(): self._controller.reset() self._event += 1 if self._event == 2: self.world_cleanup() return def world_cleanup(self): try: if self._f is not None: self._group_f.create_dataset(f"sim_time", data=self._sim_time_list, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_positions", data=self._joint_positions, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_velocities", data=self._joint_velocities, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"hand_camera", data=self._camera1_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"top_camera", data=self._camera2_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"front_camera", data=self._camera3_img, compression='gzip', compression_opts=9) self._f.close() print("Data saved") elif self._f is None: print("Invalid Operation Data not saved") except Exception as e: print(e) finally: self._f = None self._save_count = 0 self._world.pause() return
14,771
Python
33.921986
140
0.580259
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/URPalletizing/ur10_palletizing.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import random import numpy as np import omni import h5py import omni.isaac.cortex.math_util as math_util import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from omni.isaac.core.objects.capsule import VisualCapsule from omni.isaac.core.objects.sphere import VisualSphere from omni.isaac.core.prims.xform_prim import XFormPrim from omni.isaac.core.tasks.base_task import BaseTask from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.cortex.cortex_rigid_prim import CortexRigidPrim from omni.isaac.cortex.cortex_utils import get_assets_root_path from omni.isaac.cortex.robot import CortexUr10 from omni.isaac.cortex.sample_behaviors.ur10 import bin_stacking_behavior as behavior from omni.isaac.examples.cortex.cortex_base import CortexBase from omni.isaac.sensor import Camera class Ur10Assets: def __init__(self): self.assets_root_path = get_assets_root_path() self.ur10_table_usd = ( self.assets_root_path + "/Isaac/Samples/Leonardo/Stage/ur10_bin_stacking_short_suction.usd" ) self.small_klt_usd = self.assets_root_path + "/Isaac/Props/KLT_Bin/small_KLT.usd" self.background_usd = self.assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd" self.rubiks_cube_usd = self.assets_root_path + "/Isaac/Props/Rubiks_Cube/rubiks_cube.usd" def random_bin_spawn_transform(): x = random.uniform(-0.15, 0.15) y = 1.5 z = -0.15 position = np.array([x, y, z]) z = random.random() * 0.02 - 0.01 w = random.random() * 0.02 - 0.01 norm = np.sqrt(z**2 + w**2) quat = math_util.Quaternion([w / norm, 0, 0, z / norm]) if random.random() > 0.5: print("<flip>") # flip the bin so it's upside down quat = quat * math_util.Quaternion([0, 0, 1, 0]) else: print("<no flip>") return position, quat.vals class BinStackingTask(BaseTask): def __init__(self, env_path, assets) -> None: super().__init__("bin_stacking") self.assets = assets self.env_path = env_path self.bins = [] self.stashed_bins = [] self.on_conveyor = None def _spawn_bin(self, rigid_bin): x, q = random_bin_spawn_transform() rigid_bin.set_world_pose(position=x, orientation=q) rigid_bin.set_linear_velocity(np.array([0, -0.30, 0])) rigid_bin.set_visibility(True) def post_reset(self) -> None: if len(self.bins) > 0: for rigid_bin in self.bins: self.scene.remove_object(rigid_bin.name) self.bins.clear() self.on_conveyor = None def pre_step(self, time_step_index, simulation_time) -> None: """Spawn a new randomly oriented bin if the previous bin has been placed.""" spawn_new = False if self.on_conveyor is None: spawn_new = True else: (x, y, z), _ = self.on_conveyor.get_world_pose() is_on_conveyor = y > 0.0 and -0.4 < x and x < 0.4 if not is_on_conveyor: spawn_new = True if spawn_new: name = "bin_{}".format(len(self.bins)) prim_path = self.env_path + "/bins/{}".format(name) # "/Isaac/Props/KLT_Bin/small_KLT.usd" # prim_path add_reference_to_stage(usd_path=self.assets.small_klt_usd, prim_path=prim_path) self.on_conveyor = self.scene.add(CortexRigidPrim(name=name, prim_path=prim_path)) self._spawn_bin(self.on_conveyor) self.bins.append(self.on_conveyor) def world_cleanup(self): self.bins = [] self.stashed_bins = [] self.on_conveyor = None return class BinStacking(CortexBase): def __init__(self, monitor_fn=None): super().__init__() self._monitor_fn = monitor_fn self.robot = None self._sim_time_list = [] self._joint_positions = [] self._joint_velocities = [] self._camera1_img = [] self._camera2_img = [] self._camera3_img = [] self._camera4_img = [] self._camera5_img = [] self._save_count = 0 def _setup_camera(self): self._camera1 = Camera( prim_path="/World/Ur10Table/ur10/ee_link/ee_camera", # position=np.array([0.088, 0.0, 0.926]), translation=np.array([-0.15, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 180.0, -15.0, 0.0 ]), degrees=True), ) self._camera1.set_clipping_range(0.1, 1000000.0) self._camera1.set_focal_length(1.5) self._camera1.initialize() self._camera1.add_motion_vectors_to_frame() self._camera1.set_visibility(False) self._camera2 = Camera( prim_path="/World/left_camera", position=np.array([2.5, 0.0, 0.0]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, 180.0 ]), degrees=True), ) self._camera2.set_focal_length(1.5) self._camera2.set_visibility(False) self._camera2.initialize() self._camera3 = Camera( prim_path="/World/right_camera", position=np.array([-2.5, 0.0, 0.0]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, 0.0 ]), degrees=True), ) self._camera3.set_focal_length(1.5) self._camera3.set_visibility(False) self._camera3.initialize() self._camera4 = Camera( prim_path="/World/front_camera", position=np.array([0.0, 2.0, 0.0]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, -90.0 ]), degrees=True), ) self._camera4.set_focal_length(1.5) self._camera4.set_visibility(False) self._camera4.initialize() self._camera5 = Camera( prim_path="/World/back_camera", position=np.array([0.5, -2.0, -0.2]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, 90.0 ]), degrees=True), ) self._camera5.set_focal_length(1.5) self._camera5.set_visibility(False) self._camera5.initialize() def _setup_data_collection(self): self._f = h5py.File('ur_bin_palleting.hdf5','w') self._group_f = self._f.create_group("isaac_dataset") self._save_count = 0 self._img_f = self._group_f.create_group("camera_images") def setup_scene(self): world = self.get_world() self.simulation_context = SimulationContext() env_path = "/World/Ur10Table" ur10_assets = Ur10Assets() add_reference_to_stage(usd_path=ur10_assets.ur10_table_usd, prim_path=env_path) add_reference_to_stage(usd_path=ur10_assets.background_usd, prim_path="/World/Background") background_prim = XFormPrim( "/World/Background", position=[10.00, 2.00, -1.18180], orientation=[0.7071, 0, 0, 0.7071] ) self.robot = world.add_robot(CortexUr10(name="robot", prim_path="{}/ur10".format(env_path))) obs = world.scene.add( VisualSphere( "/World/Ur10Table/Obstacles/FlipStationSphere", name="flip_station_sphere", position=np.array([0.73, 0.76, -0.13]), radius=0.2, visible=False, ) ) self.robot.register_obstacle(obs) obs = world.scene.add( VisualSphere( "/World/Ur10Table/Obstacles/NavigationDome", name="navigation_dome_obs", position=[-0.031, -0.018, -1.086], radius=1.1, visible=False, ) ) self.robot.register_obstacle(obs) az = np.array([1.0, 0.0, -0.3]) ax = np.array([0.0, 1.0, 0.0]) ay = np.cross(az, ax) R = math_util.pack_R(ax, ay, az) quat = math_util.matrix_to_quat(R) obs = world.scene.add( VisualCapsule( "/World/Ur10Table/Obstacles/NavigationBarrier", name="navigation_barrier_obs", position=[0.471, 0.276, -0.463 - 0.1], orientation=quat, radius=0.5, height=0.9, visible=False, ) ) self.robot.register_obstacle(obs) obs = world.scene.add( VisualCapsule( "/World/Ur10Table/Obstacles/NavigationFlipStation", name="navigation_flip_station_obs", position=np.array([0.766, 0.755, -0.5]), radius=0.5, height=0.5, visible=False, ) ) self.robot.register_obstacle(obs) self._setup_camera() self._setup_data_collection() async def setup_post_load(self): world = self.get_world() env_path = "/World/Ur10Table" ur10_assets = Ur10Assets() if not self.robot: self.robot = world._robots["robot"] world._current_tasks.clear() world._behaviors.clear() world._logical_state_monitors.clear() self.task = BinStackingTask(env_path, ur10_assets) print(world.scene) self.task.set_up_scene(world.scene) world.add_task(self.task) self.decider_network = behavior.make_decider_network(self.robot, self._on_monitor_update) world.add_decider_network(self.decider_network) return def _on_monitor_update(self, diagnostics): decision_stack = "" if self.decider_network._decider_state.stack: decision_stack = "\n".join( [ "{0}{1}".format(" " * i, element) for i, element in enumerate(str(i) for i in self.decider_network._decider_state.stack) ] ) if self._monitor_fn: self._monitor_fn(diagnostics, decision_stack) def _on_physics_step(self, step_size): world = self.get_world() self._camera1.get_current_frame() self._camera2.get_current_frame() self._camera3.get_current_frame() self._camera4.get_current_frame() self._camera5.get_current_frame() current_time = self.simulation_context.current_time current_joint_state = self.robot.get_joints_state() current_joint_positions = current_joint_state.positions current_joint_velocities = current_joint_state.velocities print(self._save_count) if self._save_count % 50 == 0: self._sim_time_list.append(current_time) self._joint_positions.append(current_joint_positions) self._joint_velocities.append(current_joint_velocities) self._camera1_img.append(self._camera1.get_rgba()[:, :, :3]) self._camera2_img.append(self._camera2.get_rgba()[:, :, :3]) self._camera3_img.append(self._camera3.get_rgba()[:, :, :3]) self._camera4_img.append(self._camera4.get_rgba()[:, :, :3]) self._camera5_img.append(self._camera5.get_rgba()[:, :, :3]) print("Collecting data...") if self._save_count > 3000: self.save_data() self._save_count += 1 world.step(False, False) return async def on_event_async(self): world = self.get_world() await omni.kit.app.get_app().next_update_async() world.reset_cortex() world.add_physics_callback("sim_step", self._on_physics_step) await world.play_async() return async def setup_pre_reset(self): world = self.get_world() if world.physics_callback_exists("sim_step"): world.remove_physics_callback("sim_step") return def world_cleanup(self): return def save_data(self): self._group_f.create_dataset(f"sim_time", data=self._sim_time_list, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_positions", data=self._joint_positions, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_velocities", data=self._joint_velocities, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"ee_camera", data=self._camera1_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"left_camera", data=self._camera2_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"right_camera", data=self._camera3_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"front_camera", data=self._camera4_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"back_camera", data=self._camera5_img, compression='gzip', compression_opts=9) self._f.close() print("Data saved") self._save_count = 0 self._world.pause() return
14,252
Python
35.359694
126
0.57648
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/URPalletizing/ur10_palletizing_extension.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import asyncio import os import omni.ui as ui from omni.isaac.cortex.cortex_world import CortexWorld from omni.isaac.examples.base_sample import BaseSampleExtension from omni.isaac.ui.ui_utils import btn_builder, cb_builder, get_style, str_builder from .ur10_palletizing import BinStacking class BinStackingExtension(BaseSampleExtension): def on_startup(self, ext_id: str): super().on_startup(ext_id) super().start_extension( menu_name="RoadBalanceEdu", submenu_name="ETRIDemo", name="UR10 Palletizing", title="UR10 Palletizing", doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/replicator_tutorials/tutorial_replicator_ur10_palletizing.html#isaac-sim-app-tutorial-replicator-ur10-palletizing", overview="This Example shows how to do Palletizing using UR10 robot and Cortex behaviors in Isaac Sim.\n\nPress the 'Open in IDE' button to view the source code.", sample=BinStacking(self.on_diagnostics), file_path=os.path.abspath(__file__), number_of_extra_frames=2, ) self.decision_stack = "" self.task_ui_elements = {} frame = self.get_frame(index=0) self.build_task_controls_ui(frame) return def on_diagnostics(self, diagnostic, decision_stack): if self.decision_stack != decision_stack: self.decision_stack = decision_stack if decision_stack: decision_stack = "\n".join( [ "{0}{1}".format(" " * (i + 1) if i > 0 else "", element) for i, element in enumerate(decision_stack.replace("]", "").split("[")) ] ) self.state_model.set_value(decision_stack) if diagnostic.bin_name: self.selected_bin.set_value(str(diagnostic.bin_name)) self.bin_base.set_value(str(diagnostic.bin_base.prim_path)) self.grasp_reached.set_value((diagnostic.grasp_reached)) self.is_attached.set_value((diagnostic.attached)) self.needs_flip.set_value((diagnostic.needs_flip)) else: self.selected_bin.set_value(str("No Bin Selected")) self.bin_base.set_value("") self.grasp_reached.set_value(False) self.is_attached.set_value(False) self.needs_flip.set_value(False) def get_world(self): return CortexWorld.instance() def _on_start_button_event(self): asyncio.ensure_future(self.sample.on_event_async()) self.task_ui_elements["Start Palletizing"].enabled = False return def post_reset_button_event(self): self.task_ui_elements["Start Palletizing"].enabled = True return def post_load_button_event(self): self.task_ui_elements["Start Palletizing"].enabled = True return def post_clear_button_event(self): self.task_ui_elements["Start Palletizing"].enabled = False return def build_task_controls_ui(self, frame): with frame: with ui.VStack(spacing=5): # Update the Frame Title frame.title = "Task Controls" frame.visible = True dict = { "label": "Start Palletizing", "type": "button", "text": "Start Palletizing", "tooltip": "Start Palletizing", "on_clicked_fn": self._on_start_button_event, } self.task_ui_elements["Start Palletizing"] = btn_builder(**dict) self.task_ui_elements["Start Palletizing"].enabled = False # with self._main_stack: with self.get_frame(index=1): self.get_frame(index=1).title = "Diagnostics" self.get_frame(index=1).visible = True self._diagnostics = ui.VStack(spacing=5) # self._diagnostics.enabled = False with self._diagnostics: ui.Label("Decision Stack", height=20) self.state_model = ui.SimpleStringModel() ui.StringField(self.state_model, multiline=True, height=120) self.selected_bin = str_builder("Selected Bin", "<No Bin Selected>", read_only=True) self.bin_base = str_builder("Bin Base", "", read_only=True) self.grasp_reached = cb_builder("Grasp Reached", False) self.is_attached = cb_builder("Is Attached", False) self.needs_flip = cb_builder("Needs Flip", False)
5,043
Python
42.860869
187
0.608368
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloManipulator/hello_manip.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample # This extension has franka related tasks and controllers as well from omni.isaac.franka import Franka from omni.isaac.core.objects import DynamicCuboid from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.franka.tasks import PickPlace from omni.isaac.core.tasks import BaseTask import numpy as np class FrankaPlaying(BaseTask): #NOTE: we only cover here a subset of the task functions that are available, # checkout the base class for all the available functions to override. # ex: calculate_metrics, is_done..etc. def __init__(self, name): super().__init__(name=name, offset=None) self._goal_position = np.array([-0.3, -0.3, 0.0515 / 2.0]) self._task_achieved = False return # Here we setup all the assets that we care about in this task. def set_up_scene(self, scene): super().set_up_scene(scene) scene.add_default_ground_plane() self._cube = scene.add(DynamicCuboid(prim_path="/World/random_cube", name="fancy_cube", position=np.array([0.3, 0.3, 0.3]), scale=np.array([0.0515, 0.0515, 0.0515]), color=np.array([0, 0, 1.0]))) self._franka = scene.add(Franka(prim_path="/World/Fancy_Franka", name="fancy_franka")) return # Information exposed to solve the task is returned from the task through get_observations def get_observations(self): cube_position, _ = self._cube.get_world_pose() current_joint_positions = self._franka.get_joint_positions() observations = { self._franka.name: { "joint_positions": current_joint_positions, }, self._cube.name: { "position": cube_position, "goal_position": self._goal_position } } return observations # Called before each physics step, # for instance we can check here if the task was accomplished by # changing the color of the cube once its accomplished def pre_step(self, control_index, simulation_time): cube_position, _ = self._cube.get_world_pose() if not self._task_achieved and np.mean(np.abs(self._goal_position - cube_position)) < 0.02: # Visual Materials are applied by default to the cube # in this case the cube has a visual material of type # PreviewSurface, we can set its color once the target is reached. self._cube.get_applied_visual_material().set_color(color=np.array([0, 1.0, 0])) self._task_achieved = True return # Called after each reset, # for instance we can always set the gripper to be opened at the beginning after each reset # also we can set the cube's color to be blue def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._cube.get_applied_visual_material().set_color(color=np.array([0, 0, 1.0])) self._task_achieved = False return class HelloManip(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() # We add the task to the world here world.add_task(FrankaPlaying(name="my_first_task")) return async def setup_post_load(self): self._world = self.get_world() # The world already called the setup_scene from the task (with first reset of the world) # so we can retrieve the task objects self._franka = self._world.scene.get_object("fancy_franka") self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def physics_step(self, step_size): # Gets all the tasks observations current_observations = self._world.get_observations() actions = self._controller.forward( picking_position=current_observations["fancy_cube"]["position"], placing_position=current_observations["fancy_cube"]["goal_position"], current_joint_positions=current_observations["fancy_franka"]["joint_positions"], ) self._franka.apply_action(actions) if self._controller.is_done(): self._world.pause() return
5,289
Python
42.719008
99
0.629609
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/DingoLibrary/dingo_library.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.objects import DynamicCuboid from omni.isaac.sensor import Camera, RotatingLidarPhysX from omni.isaac.core import World import omni.graph.core as og import usdrt.Sdf import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from omni.physx.scripts import deformableUtils, physicsUtils from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from PIL import Image import carb import h5py import omni import cv2 class DingoLibrary(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) return def og_setup(self): domain_id = 0 try: # Twist OG maxLinearSpeed = 3.0 wheelDistance = 0.23632 wheelRadius = 0.049 jointNames = ["left_wheel_joint", "right_wheel_joint"] base_link_prim = "/World/dingo/base_link" og.Controller.edit( {"graph_path": "/ROS2DiffDrive", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("subscribeTwist", "omni.isaac.ros2_bridge.ROS2SubscribeTwist"), ("scaleToFromStage", "omni.isaac.core_nodes.OgnIsaacScaleToFromStageUnit"), ("breakLinVel", "omni.graph.nodes.BreakVector3"), ("breakAngVel", "omni.graph.nodes.BreakVector3"), ("diffController", "omni.isaac.wheeled_robots.DifferentialController"), ("artController", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("diffController.inputs:maxLinearSpeed", maxLinearSpeed), ("diffController.inputs:wheelDistance", wheelDistance), ("diffController.inputs:wheelRadius", wheelRadius), ("artController.inputs:jointNames", jointNames), ("artController.inputs:usePath", False), ("artController.inputs:targetPrim", [usdrt.Sdf.Path(base_link_prim)]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "subscribeTwist.inputs:execIn"), ("onPlaybackTick.outputs:tick", "artController.inputs:execIn"), ("context.outputs:context", "subscribeTwist.inputs:context"), ("subscribeTwist.outputs:execOut", "diffController.inputs:execIn"), ("subscribeTwist.outputs:angularVelocity", "breakAngVel.inputs:tuple"), ("subscribeTwist.outputs:linearVelocity", "scaleToFromStage.inputs:value"), ("scaleToFromStage.outputs:result", "breakLinVel.inputs:tuple"), ("breakAngVel.outputs:z", "diffController.inputs:angularVelocity"), ("breakLinVel.outputs:x", "diffController.inputs:linearVelocity"), # ("diffController.outputs:effortCommand", "artController.inputs:effortCommand"), # ("diffController.outputs:positionCommand", "artController.inputs:positionCommand"), ("diffController.outputs:velocityCommand", "artController.inputs:velocityCommand"), ], }, ) # Static TF OG og.Controller.edit( {"graph_path": "/ROS2TF", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("publishTF", "omni.isaac.ros2_bridge.ROS2PublishTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("publishTF.inputs:parentPrim", [usdrt.Sdf.Path("/World/dingo/base_link")]), ("publishTF.inputs:targetPrims", [ usdrt.Sdf.Path("/World/dingo/left_wheel_link"), usdrt.Sdf.Path("/World/dingo/right_wheel_link"), usdrt.Sdf.Path("/World/dingo/base_link/velodyne_frame"), usdrt.Sdf.Path("/World/dingo/base_link/realsense_frame"), usdrt.Sdf.Path("/World/dingo/base_link/realsense_frame/realsense_left_stereo_frame"), usdrt.Sdf.Path("/World/dingo/base_link/realsense_frame/realsense_right_stereo_frame"), ]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "publishTF.inputs:execIn"), ("context.outputs:context", "publishTF.inputs:context"), ("readSimTime.outputs:simulationTime", "publishTF.inputs:timeStamp"), ], }, ) # Odom TF OG og.Controller.edit( {"graph_path": "/ROS2Odom", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("computeOdom", "omni.isaac.core_nodes.IsaacComputeOdometry"), ("publishOdom", "omni.isaac.ros2_bridge.ROS2PublishOdometry"), ("publishRawTF", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("computeOdom.inputs:chassisPrim", [usdrt.Sdf.Path("/World/dingo")]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "computeOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishRawTF.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishOdom.inputs:timeStamp"), ("readSimTime.outputs:simulationTime", "publishRawTF.inputs:timeStamp"), ("context.outputs:context", "publishOdom.inputs:context"), ("context.outputs:context", "publishRawTF.inputs:context"), ("computeOdom.outputs:angularVelocity", "publishOdom.inputs:angularVelocity"), ("computeOdom.outputs:linearVelocity", "publishOdom.inputs:linearVelocity"), ("computeOdom.outputs:orientation", "publishOdom.inputs:orientation"), ("computeOdom.outputs:position", "publishOdom.inputs:position"), ("computeOdom.outputs:orientation", "publishRawTF.inputs:rotation"), ("computeOdom.outputs:position", "publishRawTF.inputs:translation"), ], }, ) # Clock OG og.Controller.edit( {"graph_path": "/ROS2Clock", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("publishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "publishClock.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"), ("context.outputs:context", "publishClock.inputs:context"), ], }, ) # 2D Lidar OG og.Controller.edit( {"graph_path": "/ROS2LaserScan", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("readLidar", "omni.isaac.range_sensor.IsaacReadLidarBeams"), ("publishLidar", "omni.isaac.ros2_bridge.ROS2PublishLaserScan"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("publishLidar.inputs:frameId", "velodyne_frame"), ("readLidar.inputs:lidarPrim", [usdrt.Sdf.Path("/World/dingo/base_link/velodyne_frame/Lidar/laserscan_lidar")]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "readLidar.inputs:execIn"), ("context.outputs:context", "publishLidar.inputs:context"), ("readSimTime.outputs:simulationTime", "publishLidar.inputs:timeStamp"), ("readLidar.outputs:execOut", "publishLidar.inputs:execIn"), ("readLidar.outputs:azimuthRange", "publishLidar.inputs:azimuthRange"), ("readLidar.outputs:depthRange", "publishLidar.inputs:depthRange"), ("readLidar.outputs:horizontalFov", "publishLidar.inputs:horizontalFov"), ("readLidar.outputs:horizontalResolution", "publishLidar.inputs:horizontalResolution"), ("readLidar.outputs:intensitiesData", "publishLidar.inputs:intensitiesData"), ("readLidar.outputs:linearDepthData", "publishLidar.inputs:linearDepthData"), ("readLidar.outputs:numCols", "publishLidar.inputs:numCols"), ("readLidar.outputs:numRows", "publishLidar.inputs:numRows"), ("readLidar.outputs:rotationRate", "publishLidar.inputs:rotationRate"), ], }, ) # 3D Lidar OG og.Controller.edit( {"graph_path": "/ROS2PointCloud", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("readLidar", "omni.isaac.range_sensor.IsaacReadLidarPointCloud"), ("publishLidar", "omni.isaac.ros2_bridge.ROS2PublishPointCloud"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("publishLidar.inputs:frameId", "velodyne_frame"), ("readLidar.inputs:lidarPrim", [usdrt.Sdf.Path("/World/dingo/base_link/velodyne_frame/Lidar/pointcloud_lidar")]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "readLidar.inputs:execIn"), ("context.outputs:context", "publishLidar.inputs:context"), ("readSimTime.outputs:simulationTime", "publishLidar.inputs:timeStamp"), ("readLidar.outputs:execOut", "publishLidar.inputs:execIn"), ("readLidar.outputs:data", "publishLidar.inputs:data"), ], }, ) # Left Camera OG og.Controller.edit( {"graph_path": "/ROS2CameraLeft", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("renderer", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("DepthPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("renderer.inputs:cameraPrim", [usdrt.Sdf.Path("/World/dingo/base_link/realsense_frame/realsense_left_stereo_frame/realsense_left_stereo_camera")]), ("RGBPublish.inputs:topicName", "/left/rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("RGBPublish.inputs:frameId", "realsense_left_stereo_frame"), ("DepthPublish.inputs:topicName", "/left/depth"), ("DepthPublish.inputs:type", "depth"), ("DepthPublish.inputs:resetSimulationTimeOnStop", True), ("DepthPublish.inputs:frameId", "realsense_left_stereo_frame"), ("CameraInfoPublish.inputs:topicName", "/left/camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:frameId", "realsense_left_stereo_frame"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "renderer.inputs:execIn"), ("context.outputs:context", "RGBPublish.inputs:context"), ("context.outputs:context", "DepthPublish.inputs:context"), ("context.outputs:context", "CameraInfoPublish.inputs:context"), ("renderer.outputs:execOut", "RGBPublish.inputs:execIn"), ("renderer.outputs:execOut", "DepthPublish.inputs:execIn"), ("renderer.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("renderer.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "DepthPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ], }, ) # Right Camera OG og.Controller.edit( {"graph_path": "/ROS2CameraRight", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("renderer", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("DepthPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", domain_id), ("renderer.inputs:cameraPrim", [usdrt.Sdf.Path("/World/dingo/base_link/realsense_frame/realsense_right_stereo_frame/realsense_right_stereo_camera")]), ("RGBPublish.inputs:topicName", "/right/rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("RGBPublish.inputs:frameId", "realsense_right_stereo_frame"), ("DepthPublish.inputs:topicName", "/right/depth"), ("DepthPublish.inputs:type", "depth"), ("DepthPublish.inputs:resetSimulationTimeOnStop", True), ("DepthPublish.inputs:frameId", "realsense_right_stereo_frame"), ("CameraInfoPublish.inputs:topicName", "/right/camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:frameId", "realsense_right_stereo_frame"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "renderer.inputs:execIn"), ("context.outputs:context", "RGBPublish.inputs:context"), ("context.outputs:context", "DepthPublish.inputs:context"), ("context.outputs:context", "CameraInfoPublish.inputs:context"), ("renderer.outputs:execOut", "RGBPublish.inputs:execIn"), ("renderer.outputs:execOut", "DepthPublish.inputs:execIn"), ("renderer.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("renderer.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "DepthPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ], }, ) except Exception as e: print(e) def add_background(self): bg_path = self._server_root + "/Projects/RBROS2/LibraryNoRoof/Library_No_Roof_Collide_Light.usd" add_reference_to_stage( usd_path=bg_path, prim_path=f"/World/Library_No_Roof", ) bg_mesh = UsdGeom.Mesh.Get(self._stage, "/World/Library_No_Roof") # physicsUtils.set_or_add_translate_op(bg_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.0)) # physicsUtils.set_or_add_orient_op(bg_mesh, orient=Gf.Quatf(-0.5, -0.5, -0.5, -0.5)) physicsUtils.set_or_add_scale_op(bg_mesh, scale=Gf.Vec3f(0.01, 0.01, 0.01)) def add_dingo(self): dingo_usd_path = self._server_root + "/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Robots/Clearpath/Dingo/dingo.usd" add_reference_to_stage( usd_path=dingo_usd_path, prim_path=f"/World/dingo", ) dingo_mesh = UsdGeom.Mesh.Get(self._stage, "/World/dingo") physicsUtils.set_or_add_translate_op(dingo_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.02)) # physicsUtils.set_or_add_orient_op(dingo_mesh, orient=Gf.Quatf(-0.5, -0.5, -0.5, -0.5)) # physicsUtils.set_or_add_scale_op(dingo_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) # Dingo Left Cam has a translation Error left_cam = UsdGeom.Mesh.Get(self._stage, "/World/dingo/base_link/realsense_frame/realsense_left_stereo_frame/realsense_left_stereo_camera") physicsUtils.set_or_add_translate_op(left_cam, translate=Gf.Vec3f(0.0, 0.0, 0.0)) def lidar_setup(self): self._2d_lidar = RotatingLidarPhysX( prim_path="/World/dingo/base_link/velodyne_frame/Lidar/laserscan_lidar", name="laserscan_lidar", translation=np.array([0.0, 0.0, 0.0]), ) self._2d_lidar.set_valid_range([0.4, 10.0]) self._2d_lidar.add_depth_data_to_frame() self._2d_lidar.add_point_cloud_data_to_frame() self._2d_lidar.enable_visualization(high_lod=False, draw_points=False, draw_lines=False) self._3d_lidar = RotatingLidarPhysX( prim_path="/World/dingo/base_link/velodyne_frame/Lidar/pointcloud_lidar", name="pointcloud_lidar", translation=np.array([0.0, 0.0, 0.0]), valid_range=(0.4, 10.0), ) self._3d_lidar.set_resolution([0.4, 2.0]) self._3d_lidar.add_depth_data_to_frame() self._3d_lidar.add_point_cloud_data_to_frame() self._3d_lidar.enable_visualization(high_lod=True, draw_points=False, draw_lines=False) def add_light(self): sphereLight1 = UsdLux.SphereLight.Define(self._stage, Sdf.Path("/World/SphereLight1")) sphereLight1.CreateIntensityAttr(100000) sphereLight1.CreateRadiusAttr(100.0) sphereLight1.AddTranslateOp().Set(Gf.Vec3f(885.0, 657.0, 226.0)) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.simulation_context = SimulationContext() self.add_background() self.add_dingo() self.lidar_setup() self.og_setup() return async def setup_post_load(self): self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique return def physics_callback(self, step_size): return # async def setup_pre_reset(self): # return # async def setup_post_reset(self): # return def world_cleanup(self): self._world.pause() return
23,420
Python
54.368794
174
0.55205
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorScatter2D/replicator_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.stage import open_stage import omni.replicator.core as rep import carb.settings import numpy as np from os.path import expanduser import datetime now = datetime.datetime.now() class Scatter2D(BaseSample): def __init__(self) -> None: super().__init__() self.assets_root_path = get_assets_root_path() self._nucleus_server_path = "omniverse://localhost/NVIDIA/" # Enable scripts carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True) # Disable capture on play and async rendering carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False) carb.settings.get_settings().set("/omni/replicator/asyncRendering", False) carb.settings.get_settings().set("/app/asyncRendering", False) self.spheres = None self._sim_step = 0 self.collision_objects = [] # Replicator Writerdir now_str = now.strftime("%Y-%m-%d_%H:%M:%S") self._out_dir = str(expanduser("~") + "/Documents/scatter2D_sample_" + now_str) return def randomize_spheres(self): self.spheres = rep.create.sphere(scale=0.5, count=5) with self.spheres: rep.randomizer.scatter_2d(self.collision_objects, check_for_collisions=True) def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() self.cam = rep.create.camera(position=(0, 0, 8), look_at=(0, 0, 0)) self.rp = rep.create.render_product(self.cam, resolution=(1024, 1024)) self.plane = rep.create.plane(scale=4, position = (0, 0, 0.1), rotation=(0, 0, 0), visible=True) self.collision_objects.append(self.plane) rep.randomizer.register(self.randomize_spheres) return async def setup_post_load(self): with rep.trigger.on_frame(num_frames=20): rep.randomizer.randomize_spheres() # Create a writer and apply the augmentations to its corresponding annotators self._writer = rep.WriterRegistry.get("BasicWriter") print(f"Writing data to: {self._out_dir}") self._writer.initialize( output_dir=self._out_dir, rgb=True, bounding_box_2d_tight=True, ) # Attach render product to writer self._writer.attach([self.rp]) return
2,958
Python
33.811764
104
0.664976
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaCabinet/ros2_twist_sub.py
import numpy as np # ROS2 imports import rclpy from rclpy.node import Node from std_msgs.msg import Float32 from geometry_msgs.msg import Twist, PoseStamped def quaternion_multiply(q1, q2): w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 w = w1*w2 - x1*x2 - y1*y2 - z1*z2 x = w1*x2 + x1*w2 + y1*z2 - z1*y2 y = w1*y2 - x1*z2 + y1*w2 + z1*x2 z = w1*z2 + x1*y2 - y1*x2 + z1*w2 return np.array([w, x, y, z]) class QuestROS2Sub(Node): def __init__(self): super().__init__('questros2_subscriber') queue_size = 10 # Queue Size self._rh_twist_sub = self.create_subscription( Twist, 'q2r_right_hand_twist', self.twist_sub_callback, queue_size ) self._rh_pose_sub = self.create_subscription( PoseStamped, 'q2r_right_hand_pose', self.pose_sub_callback, queue_size ) self._rh_index_btn_sub = self.create_subscription( Float32, 'right_press_index', self.btn_sub_callback, queue_size ) self._cur_twist = Twist() self._cur_pose = PoseStamped() self._btn_press = False def twist_sub_callback(self, msg): # self.get_logger().info(f""" # x : {msg.linear.x:.3f} / y : {msg.linear.y:.3f} / z : {msg.linear.z:.3f} # r : {msg.angular.x:.3f} / p : {msg.angular.y:.3f} / y : {msg.angular.z:.3f} # """) self._cur_twist = msg def pose_sub_callback(self, msg): # self.get_logger().info(f""" # x : {msg.pose.position.x:.3f} / y : {msg.pose.position.y:.3f} / z : {msg.pose.position.z:.3f} # x : {msg.pose.orientation.x:.3f} / y : {msg.pose.orientation.y:.3f} / z : {msg.pose.orientation.z:.3f} / w : {msg.pose.orientation.w:.3f} # """) self._cur_pose = msg def btn_sub_callback(self, msg): if msg.data == 1.0: self._btn_press = True else: self._btn_press = False # print(f"msg.data: {msg.data} / self._btn_press: {self._btn_press}") def get_twist(self): # return self._cur_twist lin_x, lin_y, lin_z = self._cur_twist.linear.x, self._cur_twist.linear.y, self._cur_twist.linear.z ang_x, ang_y, ang_z = self._cur_twist.angular.x, self._cur_twist.angular.y, self._cur_twist.angular.z lin_x *= 5 lin_y *= 2 lin_z *= 2 return [ lin_x, lin_y, lin_z, 0.0, 0.0, 0.0] def get_pose(self, z_offset, q_offset): position = np.array([ self._cur_pose.pose.position.x * 2, self._cur_pose.pose.position.y * 2, self._cur_pose.pose.position.z * 2 + z_offset ]) orientation = np.array([ self._cur_pose.pose.orientation.x, self._cur_pose.pose.orientation.y, self._cur_pose.pose.orientation.z, self._cur_pose.pose.orientation.w ]) orientation = quaternion_multiply(q_offset, orientation) isaac_orientation = np.array([ orientation[3], orientation[0], orientation[1], orientation[2], ]) return position, isaac_orientation def get_right_btn(self): return self._btn_press def main(args=None): """Do enter into this main function first.""" rclpy.init(args=args) quest_ros2_sub = QuestROS2Sub() rclpy.spin(quest_ros2_sub) quest_ros2_sub.destroy_node() rclpy.shutdown() if __name__ == '__main__': """main function""" main()
3,514
Python
28.291666
151
0.550939
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaCabinet/franka_cabinet.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np from omni.isaac.core.materials.physics_material import PhysicsMaterial from omni.isaac.core.prims.geometry_prim import GeometryPrim # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux, UsdShade from omni.isaac.franka.controllers.rmpflow_controller import RMPFlowController from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root import carb import omni import rclpy import asyncio from .ros2_twist_sub import QuestROS2Sub def addObjectsGeom(scene, name, scale, ini_pos, collision=None, mass=None, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) geom.set_local_scale(scale) geom.set_world_pose(position=ini_pos) geom.set_default_state(position=ini_pos, orientation=orientation) geom.set_collision_enabled(False) if collision is not None: geom.set_collision_enabled(True) geom.set_collision_approximation(collision) if mass is not None: massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom class FrankaCabinet(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._isaac_assets_path = get_assets_root_path() self.CUBE_URL = self._isaac_assets_path + "/Isaac/Props/Blocks/nvidia_cube.usd" self.CABINET_URL = self._server_root + "/Projects/RBROS2/Quest2ROS/sektin_cabinet_light.usd" self._quest_ros2_sub_node = None self._controller = None self._articulation_controller = None return def __del__(self): if self._quest_ros2_sub_node is not None: self._quest_ros2_sub_node.destroy_node() return async def ros_loop(self): while rclpy.ok(): rclpy.spin_once(self._quest_ros2_sub_node, timeout_sec=0) await asyncio.sleep(1e-4) def add_light(self): sphereLight1 = UsdLux.SphereLight.Define(self._stage, Sdf.Path("/World/SphereLight")) sphereLight1.CreateIntensityAttr(10000) sphereLight1.CreateRadiusAttr(0.1) sphereLight1.AddTranslateOp().Set(Gf.Vec3f(1.0, 0.0, 1.0)) def add_cabinet(self): add_reference_to_stage(usd_path=self.CABINET_URL, prim_path=f"/World/Cabinet") self._cabinet_geom = addObjectsGeom( self._world.scene, "Cabinet", scale=np.array([1.0, 1.0, 1.0]), ini_pos=np.array([0.9, 0.4, 0.4]), collision=None, mass=None, orientation=np.array([0.0, 0.0, 0.0, 1.0]) ) def add_franka(self): self._franka = self._world.scene.add( Franka( prim_path="/World/Fancy_Franka", name="fancy_franka" ) ) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self._world.scene.add_default_ground_plane() self.add_franka() self.add_light() self.add_cabinet() return def add_franka_material(self): self._franka_finger_physics_material = PhysicsMaterial( prim_path="/World/PhysicsMaterials/FrankaFingerMaterial", name="franka_finger_material_physics", static_friction=0.9, dynamic_friction=0.9, ) franka_left_finger = self._world.stage.GetPrimAtPath( "/World/Fancy_Franka/panda_leftfinger/geometry/panda_leftfinger" ) x = UsdShade.MaterialBindingAPI.Apply(franka_left_finger) x.Bind( self._franka_finger_physics_material.material, bindingStrength="weakerThanDescendants", materialPurpose="physics", ) franka_right_finger = self._world.stage.GetPrimAtPath( "/World/Fancy_Franka/panda_rightfinger/geometry/panda_rightfinger" ) x2 = UsdShade.MaterialBindingAPI.Apply(franka_right_finger) x2.Bind( self._franka_finger_physics_material.material, bindingStrength="weakerThanDescendants", materialPurpose="physics", ) async def setup_post_load(self): self._world = self.get_world() self._my_franka = self._world.scene.get_object("fancy_franka") self._my_gripper = self._my_franka.gripper self.add_franka_material() # RMPFlow controller self._controller = RMPFlowController( name="target_follower_controller", robot_articulation=self._my_franka ) # ROS 2 init rclpy.init(args=None) self._quest_ros2_sub_node = QuestROS2Sub() self._articulation_controller = self._my_franka.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) await self.ros_loop() return def physics_callback(self, step_size): ee_position, ee_orientation = self._quest_ros2_sub_node.get_pose( z_offset=0.8, # z -180 > 0, 0, -1, 0 # q_offset=np.array([0.0, 0.0, 0.0, 1.0]) # x 90 / z -180 # q_offset=np.array([0, 0.7071068, -0.7071068, 0 ]) # y 90 / z -180 q_offset=np.array([-0.7071068, 0, -0.7071068, 0 ]) ) gripper_command = self._quest_ros2_sub_node.get_right_btn() if np.array_equal( ee_position, np.array([0.0, 0.0, 0.0]) ): ee_position = np.array([0.4, 0, 0.5]) if gripper_command: self._my_gripper.close() else: self._my_gripper.open() # RMPFlow controller actions = self._controller.forward( target_end_effector_position=ee_position, # target_end_effector_orientation=ee_orientation, # w x y z => x y z w # 0 0 1 0 => 0 1 0 0 # 0 0 1 0 => 0 1 0 0 target_end_effector_orientation=np.array([ 0.5, -0.5, 0.5, -0.5 ]), ) self._articulation_controller.apply_action(actions) return async def setup_pre_reset(self): world = self.get_world() if world.physics_callback_exists("sim_step"): world.remove_physics_callback("sim_step") self._controller.reset() return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def world_cleanup(self): self._world.pause() self._controller = None return
7,808
Python
34.175676
116
0.616931
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/WheeledRobotsKaya/kaya_robot.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup import numpy as np class KayaRobot(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() assets_root_path = get_assets_root_path() kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd" self._wheeled_robot = world.scene.add( WheeledRobot( prim_path="/World/Kaya", name="my_kaya", wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"], create_robot=True, usd_path=kaya_asset_path, position=np.array([0, 0.0, 0.02]), orientation=np.array([1.0, 0.0, 0.0, 0.0]), ) ) self._save_count = 0 return async def setup_post_load(self): self._world = self.get_world() kaya_setup = HolonomicRobotUsdSetup( robot_prim_path=self._wheeled_robot.prim_path, com_prim_path="/World/Kaya/base_link/control_offset" ) ( wheel_radius, wheel_positions, wheel_orientations, mecanum_angles, wheel_axis, up_axis, ) = kaya_setup.get_holonomic_controller_params() self._holonomic_controller = HolonomicController( name="holonomic_controller", wheel_radius=wheel_radius, wheel_positions=wheel_positions, wheel_orientations=wheel_orientations, mecanum_angles=mecanum_angles, wheel_axis=wheel_axis, up_axis=up_axis, ) print("wheel_radius : ", wheel_radius) print("wheel_positions : ", wheel_positions) print("wheel_orientations : ", wheel_orientations) print("mecanum_angles : ", mecanum_angles) print("wheel_axis : ", wheel_axis) print("up_axis : ", up_axis) self._holonomic_controller.reset() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._save_count += 1 wheel_action = None if self._save_count >= 0 and self._save_count < 300: wheel_action = self._holonomic_controller.forward(command=[0.5, 0.0, 0.0]) elif self._save_count >= 300 and self._save_count < 600: wheel_action = self._holonomic_controller.forward(command=[-0.5, 0.0, 0.0]) elif self._save_count >= 600 and self._save_count < 900: wheel_action = self._holonomic_controller.forward(command=[0.0, 0.5, 0.0]) elif self._save_count >= 900 and self._save_count < 1200: wheel_action = self._holonomic_controller.forward(command=[0.0, -0.5, 0.0]) elif self._save_count >= 1200 and self._save_count < 1500: wheel_action = self._holonomic_controller.forward(command=[0.0, 0.0, 0.2]) elif self._save_count >= 1500 and self._save_count < 1800: wheel_action = self._holonomic_controller.forward(command=[0.0, 0.0, -0.2]) else: self._save_count = 0 self._wheeled_robot.apply_wheel_actions(wheel_action) return
4,150
Python
37.435185
111
0.62241
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/LimoDiffROS2/limo_diff_drive.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.physx.scripts import deformableUtils, physicsUtils import omni.graph.core as og import numpy as np import usdrt.Sdf import omni import carb class LimoDiffDrive(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self.TRACK_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/LIMO_simulation_table.usd" self.ROBOT_PATH = self._server_root + "/Projects/WegoLimo/Limo/limo_diff_thin.usd" self.LIGHT_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/Traffic_Light.usdz" self.STOP_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/Stop_Sign.usdz" self.HYDRANT_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/Hydrant.usdz" self.PARK_BENCH_PATH = self._server_root + "/Projects/WegoLimo/LimoTrack/Park_Bench.usdz" # omniverse://localhost/Projects/WegoLimo/LimoTrack/Traffic_Light.usdz self._domain_id = 30 self._maxLinearSpeed = 1e6 self._wheelDistance = 0.43 self._wheelRadius = 0.045 self._front_jointNames = ["rear_left_wheel", "rear_right_wheel"] self._rear_jointNames = ["front_left_wheel", "front_right_wheel"] self._contorl_targetPrim = "/World/Limo/base_link" self._odom_targetPrim = "/World/Limo/base_footprint" self._cameraPath = "/World/Limo/depth_link/rgb_camera" return def og_setup(self): try: # OG reference : https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_drive_turtlebot.html og.Controller.edit( {"graph_path": "/ROS2DiffDrive", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("subscribeTwist", "omni.isaac.ros2_bridge.ROS2SubscribeTwist"), ("scaleToFromStage", "omni.isaac.core_nodes.OgnIsaacScaleToFromStageUnit"), ("breakLinVel", "omni.graph.nodes.BreakVector3"), ("breakAngVel", "omni.graph.nodes.BreakVector3"), ("diffController", "omni.isaac.wheeled_robots.DifferentialController"), ("artControllerRear", "omni.isaac.core_nodes.IsaacArticulationController"), ("artControllerFront", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("diffController.inputs:maxLinearSpeed", self._maxLinearSpeed), ("diffController.inputs:wheelDistance", self._wheelDistance), ("diffController.inputs:wheelRadius", self._wheelRadius), ("artControllerRear.inputs:jointNames", self._front_jointNames), ("artControllerRear.inputs:targetPrim", [usdrt.Sdf.Path(self._contorl_targetPrim)]), ("artControllerRear.inputs:usePath", False), ("artControllerFront.inputs:jointNames", self._rear_jointNames), ("artControllerFront.inputs:targetPrim", [usdrt.Sdf.Path(self._contorl_targetPrim)]), ("artControllerFront.inputs:usePath", False), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "subscribeTwist.inputs:execIn"), ("onPlaybackTick.outputs:tick", "artControllerRear.inputs:execIn"), ("onPlaybackTick.outputs:tick", "artControllerFront.inputs:execIn"), ("context.outputs:context", "subscribeTwist.inputs:context"), ("subscribeTwist.outputs:execOut", "diffController.inputs:execIn"), ("subscribeTwist.outputs:angularVelocity", "breakAngVel.inputs:tuple"), ("subscribeTwist.outputs:linearVelocity", "scaleToFromStage.inputs:value"), ("scaleToFromStage.outputs:result", "breakLinVel.inputs:tuple"), ("breakAngVel.outputs:z", "diffController.inputs:angularVelocity"), ("breakLinVel.outputs:x", "diffController.inputs:linearVelocity"), ("diffController.outputs:velocityCommand", "artControllerRear.inputs:velocityCommand"), ("diffController.outputs:velocityCommand", "artControllerFront.inputs:velocityCommand"), ], }, ) # OG reference : https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_tf.html og.Controller.edit( {"graph_path": "/ROS2Odom", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("computeOdom", "omni.isaac.core_nodes.IsaacComputeOdometry"), ("publishOdom", "omni.isaac.ros2_bridge.ROS2PublishOdometry"), ("publishRawTF", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("computeOdom.inputs:chassisPrim", [usdrt.Sdf.Path(self._odom_targetPrim)]), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "computeOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishOdom.inputs:execIn"), ("onPlaybackTick.outputs:tick", "publishRawTF.inputs:execIn"), ("readSimTime.outputs:simulationTime", "publishOdom.inputs:timeStamp"), ("readSimTime.outputs:simulationTime", "publishRawTF.inputs:timeStamp"), ("context.outputs:context", "publishOdom.inputs:context"), ("context.outputs:context", "publishRawTF.inputs:context"), ("computeOdom.outputs:angularVelocity", "publishOdom.inputs:angularVelocity"), ("computeOdom.outputs:linearVelocity", "publishOdom.inputs:linearVelocity"), ("computeOdom.outputs:orientation", "publishOdom.inputs:orientation"), ("computeOdom.outputs:position", "publishOdom.inputs:position"), ("computeOdom.outputs:orientation", "publishRawTF.inputs:rotation"), ("computeOdom.outputs:position", "publishRawTF.inputs:translation"), ], }, ) # Right Camera OG og.Controller.edit( {"graph_path": "/ROS2Camera", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("onPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("context", "omni.isaac.ros2_bridge.ROS2Context"), ("renderer", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("DepthPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("context.inputs:domain_id", self._domain_id), ("renderer.inputs:cameraPrim", [usdrt.Sdf.Path(self._cameraPath)]), ("RGBPublish.inputs:topicName", "/limo/rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("RGBPublish.inputs:frameId", "limo_rgbd_frame"), ("DepthPublish.inputs:topicName", "/limo/depth"), ("DepthPublish.inputs:type", "depth"), ("DepthPublish.inputs:resetSimulationTimeOnStop", True), ("DepthPublish.inputs:frameId", "limo_rgbd_frame"), ("CameraInfoPublish.inputs:topicName", "/limo/camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:frameId", "limo_rgbd_frame"), ], og.Controller.Keys.CONNECT: [ ("onPlaybackTick.outputs:tick", "renderer.inputs:execIn"), ("context.outputs:context", "RGBPublish.inputs:context"), ("context.outputs:context", "DepthPublish.inputs:context"), ("context.outputs:context", "CameraInfoPublish.inputs:context"), ("renderer.outputs:execOut", "RGBPublish.inputs:execIn"), ("renderer.outputs:execOut", "DepthPublish.inputs:execIn"), ("renderer.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("renderer.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "DepthPublish.inputs:renderProductPath"), ("renderer.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ], }, ) except Exception as e: print(e) def add_background(self): add_reference_to_stage(usd_path=self.TRACK_PATH, prim_path="/World/LimoTrack") bg_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/LimoTrack") physicsUtils.set_or_add_scale_op(bg_mesh, scale=Gf.Vec3f(0.01, 0.01, 0.01)) def add_robot(self): add_reference_to_stage(usd_path=self.ROBOT_PATH, prim_path="/World/Limo") limo_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/Limo") physicsUtils.set_or_add_translate_op(limo_mesh, translate=Gf.Vec3f(0.0, -0.18, 0.0)) def add_light(self): distantLight1 = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/distantLight1")) distantLight1.CreateIntensityAttr(3000) distantLight1.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 0.0)) def add_objects(self): # Add Traffic Light add_reference_to_stage(usd_path=self.LIGHT_PATH, prim_path="/World/TrafficLight") light_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/TrafficLight") physicsUtils.set_or_add_translate_op(light_mesh, translate=Gf.Vec3f(0.8, 0.0, 0.0)) physicsUtils.set_or_add_orient_op(light_mesh, orient=Gf.Quatf( # w, x, y, z 0.5, 0.5, -0.5, -0.5 )) physicsUtils.set_or_add_scale_op(light_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) # Add Stop Sign add_reference_to_stage(usd_path=self.STOP_PATH, prim_path="/World/StopSign") stop_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/StopSign") physicsUtils.set_or_add_translate_op(stop_mesh, translate=Gf.Vec3f(0.8, -0.1, 0.0)) physicsUtils.set_or_add_orient_op(stop_mesh, orient=Gf.Quatf( 0.5, 0.5, -0.5, -0.5 )) physicsUtils.set_or_add_scale_op(stop_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) # Add Hydrant add_reference_to_stage(usd_path=self.HYDRANT_PATH, prim_path="/World/Hydrant") hydrant_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/Hydrant") physicsUtils.set_or_add_translate_op(hydrant_mesh, translate=Gf.Vec3f(0.8, -0.2, 0.04)) physicsUtils.set_or_add_orient_op(hydrant_mesh, orient=Gf.Quatf( 0.5, 0.5, -0.5, -0.5 )) physicsUtils.set_or_add_scale_op(hydrant_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) # Add Park Bench add_reference_to_stage(usd_path=self.PARK_BENCH_PATH, prim_path="/World/ParkBench") bench_mesh = UsdGeom.Mesh.Get(omni.usd.get_context().get_stage(), "/World/ParkBench") physicsUtils.set_or_add_translate_op(bench_mesh, translate=Gf.Vec3f(0.8, -0.4, 0.0)) physicsUtils.set_or_add_orient_op(bench_mesh, orient=Gf.Quatf( 0.5, 0.5, 0.5, 0.5 )) physicsUtils.set_or_add_scale_op(bench_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self.add_background() self.add_light() self.add_robot() self.add_objects() self.og_setup() self._save_count = 0 return async def setup_post_load(self): self._world = self.get_world() # self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._world.pause() return async def setup_post_reset(self): await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
14,810
Python
55.315589
128
0.586496
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIcable/cable_demo.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.franka import Franka from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.isaac.core import SimulationContext import omni.physx.bindings._physx as physx_bindings from omni.physx.scripts import utils, physicsUtils import omni.physxdemos as demo from pxr import UsdLux, UsdGeom, Sdf, Gf, UsdPhysics, UsdShade, PhysxSchema import numpy as np import carb import omni def createSdfResolution(stage, primPath, kinematic=False): bodyPrim = stage.GetPrimAtPath(primPath) meshCollision = PhysxSchema.PhysxSDFMeshCollisionAPI.Apply(bodyPrim) meshCollision.CreateSdfResolutionAttr().Set(350) def createRigidBody(stage, primPath, kinematic=False): bodyPrim = stage.GetPrimAtPath(primPath) rigid_api = UsdPhysics.RigidBodyAPI.Apply(bodyPrim) rigid_api.CreateRigidBodyEnabledAttr(True) def addObjectsGeom(scene, name, scale, ini_pos, collision=None, mass=None, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) geom.set_local_scale(scale) geom.set_world_pose(position=ini_pos) geom.set_default_state(position=ini_pos, orientation=orientation) geom.set_collision_enabled(False) if collision is not None: geom.set_collision_enabled(True) geom.set_collision_approximation(collision) if mass is not None: massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom class CableDemo(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # self.USB_MALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Male_modi1_3.usd" self.USB_MALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Male_modi1_3_pure.usd" self.USB_FEMALE_PATH = self._server_root + "/Projects/ETRI/USB_A/USB_A_2_Female_modi1_7.usd" # configure ropes: rope_length = 300 num_ropes = 3 self._defaultPrimPath = Sdf.Path("/World") self._linkHalfLength = 3 self._linkRadius = 0.5 * self._linkHalfLength self._ropeLength = rope_length self._numRopes = num_ropes self._ropeSpacing = 15.0 self._ropeColor = demo.get_primary_color() self._coneAngleLimit = 110 self._rope_damping = 10.0 self._rope_stiffness = 1.0 # configure collider capsule: self._capsuleZ = 50.0 self._capsuleHeight = 400.0 self._capsuleRadius = 20.0 self._capsuleRestOffset = -2.0 self._capsuleColor = demo.get_static_color() return def _createCapsule(self, path: Sdf.Path): capsuleGeom = UsdGeom.Capsule.Define(self._stage, path) capsuleGeom.CreateHeightAttr(self._linkHalfLength) capsuleGeom.CreateRadiusAttr(self._linkRadius) capsuleGeom.CreateAxisAttr("X") capsuleGeom.CreateDisplayColorAttr().Set([self._ropeColor]) UsdPhysics.CollisionAPI.Apply(capsuleGeom.GetPrim()) UsdPhysics.RigidBodyAPI.Apply(capsuleGeom.GetPrim()) massAPI = UsdPhysics.MassAPI.Apply(capsuleGeom.GetPrim()) massAPI.CreateDensityAttr().Set(0.00005) physxCollisionAPI = PhysxSchema.PhysxCollisionAPI.Apply(capsuleGeom.GetPrim()) physxCollisionAPI.CreateRestOffsetAttr().Set(0.0) physxCollisionAPI.CreateContactOffsetAttr().Set(self._contactOffset) physicsUtils.add_physics_material_to_prim(self._stage, capsuleGeom.GetPrim(), self._physicsMaterialPath) def _createJoint(self, jointPath): joint = UsdPhysics.Joint.Define(self._stage, jointPath) # locked DOF (lock - low is greater than high) d6Prim = joint.GetPrim() limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "rotX") limitAPI.CreateLowAttr(1.0) limitAPI.CreateHighAttr(-1.0) # Moving DOF: dofs = ["rotY", "rotZ"] for d in dofs: limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, d) limitAPI.CreateLowAttr(-self._coneAngleLimit) limitAPI.CreateHighAttr(self._coneAngleLimit) # joint drives for rope dynamics: driveAPI = UsdPhysics.DriveAPI.Apply(d6Prim, d) driveAPI.CreateTypeAttr("force") driveAPI.CreateDampingAttr(self._rope_damping) driveAPI.CreateStiffnessAttr(self._rope_stiffness) def _createRopes(self): linkLength = 2.0 * self._linkHalfLength - self._linkRadius numLinks = int(self._ropeLength / linkLength) xStart = -numLinks * linkLength * 0.5 yStart = -(self._numRopes // 2) * self._ropeSpacing for ropeInd in range(self._numRopes): scopePath = self._defaultPrimPath.AppendChild(f"Rope{ropeInd}") UsdGeom.Scope.Define(self._stage, scopePath) # capsule instancer instancerPath = scopePath.AppendChild("rigidBodyInstancer") rboInstancer = UsdGeom.PointInstancer.Define(self._stage, instancerPath) capsulePath = instancerPath.AppendChild("capsule") self._createCapsule(capsulePath) meshIndices = [] positions = [] orientations = [] y = yStart + ropeInd * self._ropeSpacing z = self._capsuleZ + self._capsuleRadius + self._linkRadius * 1.4 for linkInd in range(numLinks): meshIndices.append(0) x = xStart + linkInd * linkLength positions.append(Gf.Vec3f(x, y, z)) orientations.append(Gf.Quath(1.0)) meshList = rboInstancer.GetPrototypesRel() # add mesh reference to point instancer meshList.AddTarget(capsulePath) rboInstancer.GetProtoIndicesAttr().Set(meshIndices) rboInstancer.GetPositionsAttr().Set(positions) rboInstancer.GetOrientationsAttr().Set(orientations) # joint instancer jointInstancerPath = scopePath.AppendChild("jointInstancer") jointInstancer = PhysxSchema.PhysxPhysicsJointInstancer.Define(self._stage, jointInstancerPath) jointPath = jointInstancerPath.AppendChild("joint") self._createJoint(jointPath) meshIndices = [] body0s = [] body0indices = [] localPos0 = [] localRot0 = [] body1s = [] body1indices = [] localPos1 = [] localRot1 = [] body0s.append(instancerPath) body1s.append(instancerPath) jointX = self._linkHalfLength - 0.5 * self._linkRadius for linkInd in range(numLinks - 1): meshIndices.append(0) body0indices.append(linkInd) body1indices.append(linkInd + 1) localPos0.append(Gf.Vec3f(jointX, 0, 0)) localPos1.append(Gf.Vec3f(-jointX, 0, 0)) localRot0.append(Gf.Quath(1.0)) localRot1.append(Gf.Quath(1.0)) meshList = jointInstancer.GetPhysicsPrototypesRel() meshList.AddTarget(jointPath) jointInstancer.GetPhysicsProtoIndicesAttr().Set(meshIndices) jointInstancer.GetPhysicsBody0sRel().SetTargets(body0s) jointInstancer.GetPhysicsBody0IndicesAttr().Set(body0indices) jointInstancer.GetPhysicsLocalPos0sAttr().Set(localPos0) jointInstancer.GetPhysicsLocalRot0sAttr().Set(localRot0) jointInstancer.GetPhysicsBody1sRel().SetTargets(body1s) jointInstancer.GetPhysicsBody1IndicesAttr().Set(body1indices) jointInstancer.GetPhysicsLocalPos1sAttr().Set(localPos1) jointInstancer.GetPhysicsLocalRot1sAttr().Set(localRot1) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() # self._world.scene.add_default_ground_plane() # physics options: self._contactOffset = 2.0 self._physicsMaterialPath = self._defaultPrimPath.AppendChild("PhysicsMaterial") UsdShade.Material.Define(self._stage, self._physicsMaterialPath) material = UsdPhysics.MaterialAPI.Apply(self._stage.GetPrimAtPath(self._physicsMaterialPath)) material.CreateStaticFrictionAttr().Set(0.5) material.CreateDynamicFrictionAttr().Set(0.5) material.CreateRestitutionAttr().Set(0) self._world.scene.add_ground_plane() # self.setup_simulation() self.add_light(self._world.scene.stage) # self._createRopes() # # USB Male # add_reference_to_stage(usd_path=self.USB_MALE_PATH, prim_path=f"/World/usb_male") # createSdfResolution(self._world.scene.stage, "/World/usb_male") # createRigidBody(self._world.scene.stage, "/World/usb_male") # self._usb_male_geom = addObjectsGeom( # self._world.scene, "usb_male", # scale=np.array([0.02, 0.02, 0.02]), # ini_pos=np.array([0.5, 0.2, -0.01]), # # ini_pos=np.array([0.50037, -0.2, 0.06578]), # collision="sdf", # mass=None, # orientation=None # ) self.simulation_context = SimulationContext() return def add_light(self, stage): sphereLight = UsdLux.SphereLight.Define(stage, Sdf.Path("/World/SphereLight")) sphereLight.CreateRadiusAttr(0.2) sphereLight.CreateIntensityAttr(30000) sphereLight.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 2.0)) def setup_simulation(self): self._scene = PhysicsContext() # self._scene.set_solver_type("TGS") self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) # self._scene.set_friction_offset_threshold(0.01) # self._scene.set_friction_correlation_distance(0.0005) # self._scene.set_gpu_total_aggregate_pairs_capacity(10 * 1024) # self._scene.set_gpu_found_lost_pairs_capacity(10 * 1024) # self._scene.set_gpu_heap_capacity(64 * 1024 * 1024) # self._scene.set_gpu_found_lost_aggregate_pairs_capacity(10 * 1024) # # added because of new errors regarding collisionstacksize # physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(get_prim_at_path("/physicsScene")) # physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(76000000) # or whatever min is needed async def setup_post_load(self): self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) return def physics_step(self, step_size): return async def setup_pre_reset(self): return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): return
12,777
Python
40.487013
112
0.648666
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaFollowTarget/franka_follow_target.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np from omni.isaac.core.prims.geometry_prim import GeometryPrim # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.isaac.franka.controllers.rmpflow_controller import RMPFlowController from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root import carb import omni import rclpy import asyncio from .ros2_twist_sub import QuestROS2Sub def addObjectsGeom(scene, name, scale, ini_pos, collision=None, mass=None, orientation=None): scene.add(GeometryPrim(prim_path=f"/World/{name}", name=f"{name}_ref_geom", collision=True)) geom = scene.get_object(f"{name}_ref_geom") if orientation is None: # Usually - (x, y, z, w) # But in Isaac Sim - (w, x, y, z) orientation = np.array([1.0, 0.0, 0.0, 0.0]) geom.set_local_scale(scale) geom.set_world_pose(position=ini_pos) geom.set_default_state(position=ini_pos, orientation=orientation) geom.set_collision_enabled(False) if collision is not None: geom.set_collision_enabled(True) geom.set_collision_approximation(collision) if mass is not None: massAPI = UsdPhysics.MassAPI.Apply(geom.prim.GetPrim()) massAPI.CreateMassAttr().Set(mass) return geom class FrankaFollowTarget(BaseSample): def __init__(self) -> None: super().__init__() self._isaac_assets_path = get_assets_root_path() self.CUBE_URL = self._isaac_assets_path + "/Isaac/Props/Blocks/nvidia_cube.usd" self._quest_ros2_sub_node = None self._controller = None self._articulation_controller = None return def __del__(self): if self._quest_ros2_sub_node is not None: self._quest_ros2_sub_node.destroy_node() rclpy.shutdown() return async def ros_loop(self): while rclpy.ok(): rclpy.spin_once(self._quest_ros2_sub_node, timeout_sec=0) await asyncio.sleep(1e-4) def add_light(self): sphereLight1 = UsdLux.SphereLight.Define(self._stage, Sdf.Path("/World/SphereLight")) sphereLight1.CreateIntensityAttr(10000) sphereLight1.CreateRadiusAttr(0.1) sphereLight1.AddTranslateOp().Set(Gf.Vec3f(1.0, 0.0, 1.0)) def add_cube(self): add_reference_to_stage(usd_path=self.CUBE_URL, prim_path=f"/World/NVIDIA_Cube") self._cube_geom = addObjectsGeom( self._world.scene, "NVIDIA_Cube", scale=np.array([1.0, 1.0, 1.0]), ini_pos=np.array([0.5, 0.3, 0.1]), collision="sdf", mass=0.1, orientation=None ) def add_franka(self): self._franka = self._world.scene.add( Franka( prim_path="/World/Fancy_Franka", name="fancy_franka" ) ) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self._world.scene.add_default_ground_plane() self.add_franka() self.add_light() self.add_cube() return async def setup_post_load(self): self._world = self.get_world() self._my_franka = self._world.scene.get_object("fancy_franka") self._my_gripper = self._my_franka.gripper # RMPFlow controller self._controller = RMPFlowController( name="target_follower_controller", robot_articulation=self._my_franka ) # ROS 2 init rclpy.init(args=None) self._quest_ros2_sub_node = QuestROS2Sub() self._articulation_controller = self._my_franka.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) await self.ros_loop() return def physics_callback(self, step_size): ee_position, ee_orientation = self._quest_ros2_sub_node.get_pose( z_offset=0.30, # z -180 > 0, 0, -1, 0 q_offset=np.array([0.0, 0.0, -1.0, 0.0]) ) gripper_command = self._quest_ros2_sub_node.get_right_btn() if np.array_equal( ee_position, np.array([0.0, 0.0, 0.0]) ): ee_position = np.array([0.4, 0, 0.5]) if gripper_command: self._my_gripper.close() else: self._my_gripper.open() # RMPFlow controller actions = self._controller.forward( target_end_effector_position=ee_position, target_end_effector_orientation=ee_orientation, # w x y z => x y z w # 0 0 1 0 => 0 1 0 0 # 0 0 1 0 => 0 1 0 0 # target_end_effector_orientation=np.array([0, 0, 1, 0]), ) self._articulation_controller.apply_action(actions) return async def setup_pre_reset(self): world = self.get_world() if world.physics_callback_exists("sim_step"): world.remove_physics_callback("sim_step") self._controller.reset() return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def world_cleanup(self): self._world.pause() self._controller = None return
5,982
Python
32.424581
116
0.622033
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloMultiRobot/hello_multi_robot.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.franka.tasks import PickPlace from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.wheeled_robots.controllers import WheelBasePoseController from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController from omni.isaac.core.tasks import BaseTask from omni.isaac.core.utils.types import ArticulationAction import numpy as np class RobotsPlaying(BaseTask): def __init__( self, name ): super().__init__(name=name, offset=None) self._jetbot_goal_position = np.array([1.3, 0.3, 0]) self._task_event = 0 self._pick_place_task = PickPlace(cube_initial_position=np.array([0.1, 0.3, 0.05]), target_position=np.array([0.7, -0.3, 0.0515 / 2.0])) return def set_up_scene(self, scene): super().set_up_scene(scene) self._pick_place_task.set_up_scene(scene) assets_root_path = get_assets_root_path() jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd" self._jetbot = scene.add( WheeledRobot( prim_path="/World/Fancy_Jetbot", name="fancy_jetbot", wheel_dof_names=["left_wheel_joint", "right_wheel_joint"], create_robot=True, usd_path=jetbot_asset_path, position=np.array([0, 0.3, 0]), ) ) pick_place_params = self._pick_place_task.get_params() self._franka = scene.get_object(pick_place_params["robot_name"]["value"]) self._franka.set_world_pose(position=np.array([1.0, 0, 0])) self._franka.set_default_state(position=np.array([1.0, 0, 0])) return def get_observations(self): current_jetbot_position, current_jetbot_orientation = self._jetbot.get_world_pose() observations= { "task_event": self._task_event, self._jetbot.name: { "position": current_jetbot_position, "orientation": current_jetbot_orientation, "goal_position": self._jetbot_goal_position } } # add the subtask's observations as well observations.update(self._pick_place_task.get_observations()) return observations def get_params(self): pick_place_params = self._pick_place_task.get_params() params_representation = pick_place_params params_representation["jetbot_name"] = {"value": self._jetbot.name, "modifiable": False} params_representation["franka_name"] = pick_place_params["robot_name"] return params_representation def pre_step(self, control_index, simulation_time): if self._task_event == 0: current_jetbot_position, _ = self._jetbot.get_world_pose() if np.mean(np.abs(current_jetbot_position[:2] - self._jetbot_goal_position[:2])) < 0.04: self._task_event += 1 self._cube_arrive_step_index = control_index elif self._task_event == 1: if control_index - self._cube_arrive_step_index == 200: self._task_event += 1 return def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._task_event = 0 return class HelloMultiRobot(BaseSample): def __init__(self) -> None: super().__init__() return def setup_scene(self): world = self.get_world() world.add_task(RobotsPlaying(name="awesome_task")) return async def setup_post_load(self): self._world = self.get_world() task_params = self._world.get_task("awesome_task").get_params() # We need franka later to apply to it actions self._franka = self._world.scene.get_object(task_params["franka_name"]["value"]) self._jetbot = self._world.scene.get_object(task_params["jetbot_name"]["value"]) # We need the cube later on for the pick place controller self._cube_name = task_params["cube_name"]["value"] # Add Franka Controller self._franka_controller = PickPlaceController(name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka) self._jetbot_controller = WheelBasePoseController(name="cool_controller", open_loop_wheel_controller= DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125)) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_post_reset(self): self._franka_controller.reset() self._jetbot_controller.reset() await self._world.play_async() return def physics_step(self, step_size): current_observations = self._world.get_observations() if current_observations["task_event"] == 0: self._jetbot.apply_wheel_actions( self._jetbot_controller.forward( start_position=current_observations[self._jetbot.name]["position"], start_orientation=current_observations[self._jetbot.name]["orientation"], goal_position=current_observations[self._jetbot.name]["goal_position"])) elif current_observations["task_event"] == 1: self._jetbot.apply_wheel_actions(ArticulationAction(joint_velocities=[-8, -8])) elif current_observations["task_event"] == 2: self._jetbot.apply_wheel_actions(ArticulationAction(joint_velocities=[0.0, 0.0])) # Pick up the block actions = self._franka_controller.forward( picking_position=current_observations[self._cube_name]["position"], placing_position=current_observations[self._cube_name]["target_position"], current_joint_positions=current_observations[self._franka.name]["joint_positions"]) self._franka.apply_action(actions) # Pause once the controller is done if self._franka_controller.is_done(): self._world.pause() return
7,063
Python
46.093333
118
0.612771
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ETRIUR10/ur10_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.tasks import BaseTask from omni.isaac.franka import Franka from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.prims.geometry_prim import GeometryPrim from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.franka import Franka from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.rotations import euler_angles_to_quat from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from omni.isaac.core import SimulationContext from omni.physx.scripts import utils from pxr import PhysxSchema import numpy as np import carb from omni.isaac.universal_robots import UR10 from omni.isaac.universal_robots.controllers.rmpflow_controller import RMPFlowController class UR10Basic(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) return def setup_scene(self): self._world = self.get_world() self._world.scene.add_default_ground_plane() # Add UR10 robot self._ur10 = self._world.scene.add( UR10( prim_path="/World/UR10", name="UR10", attach_gripper=False, ) ) self.simulation_context = SimulationContext() return async def setup_post_load(self): self._my_ur10 = self._world.scene.get_object("UR10") # RMPFlow controller self._controller = RMPFlowController( name="target_follower_controller", robot_articulation=self._my_ur10 ) self._articulation_controller = self._my_ur10.get_articulation_controller() self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) return def physics_step(self, step_size): # RMPFlow controller actions = self._controller.forward( target_end_effector_position=np.array([0.4, 0, 0.5]), # target_end_effector_orientation=ee_orientation, # w x y z => x y z w # 0 0 1 0 => 0 1 0 0 # 0 0 1 0 => 0 1 0 0 target_end_effector_orientation=np.array([1, 0, 0, 0]), ) self._articulation_controller.apply_action(actions) return async def setup_pre_reset(self): self._save_count = 0 self._event = 0 return async def setup_post_reset(self): await self._world.play_async() return def world_cleanup(self): return
3,429
Python
31.666666
101
0.673666
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/HelloCamera/hello_camera.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from datetime import datetime import numpy as np # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from omni.isaac.core.objects import DynamicCuboid from omni.isaac.sensor import Camera from omni.isaac.core import World import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from PIL import Image import carb import h5py import omni import cv2 class HelloCamera(BaseSample): def __init__(self) -> None: super().__init__() self._save_count = 0 self._f = None self._time_list = [] self._img_list = [] return def setup_camera(self): self._camera = Camera( prim_path="/World/camera", position=np.array([0.0, 0.0, 25.0]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True), ) self._camera.initialize() self._camera.add_motion_vectors_to_frame() def setup_cube(self, prim_path, name, position, scale, color): self._fancy_cube = self._world.scene.add( DynamicCuboid( prim_path=prim_path, name=name, position=position, scale=scale, color=color, )) def setup_dataset(self): now = datetime.now() # current date and time date_time_str = now.strftime("%m_%d_%Y_%H_%M_%S") file_name = f'hello_cam_{date_time_str}.hdf5' print(file_name) self._f = h5py.File(file_name,'w') self._group = self._f.create_group("isaac_save_data") def setup_scene(self): print("setup_scene") world = self.get_world() world.scene.add_default_ground_plane() self.setup_camera() self.setup_cube( prim_path="/World/random_cube", name="fancy_cube", position=np.array([0, 0, 1.0]), scale=np.array([0.5015, 0.5015, 0.5015]), color=np.array([0, 0, 1.0]), ) self.setup_dataset() self.simulation_context = SimulationContext() async def setup_post_load(self): print("setup_post_load") world = self.get_world() self._camera.add_motion_vectors_to_frame() self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique return def physics_callback(self, step_size): self._camera.get_current_frame() if self._save_count % 100 == 0: current_time = self.simulation_context.current_time self._time_list.append(current_time) self._img_list.append(self._camera.get_rgba()[:, :, :3]) print("Data Collected") self._save_count += 1 if self._save_count == 500: self.world_cleanup() async def setup_pre_reset(self): if self._f is not None: self._f.close() self._f = None elif self._f is None: print("Create new file for new data collection...") self.setup_dataset() self._save_count = 0 return # async def setup_post_reset(self): # return def world_cleanup(self): if self._f is not None: self._group.create_dataset(f"sim_time", data=self._time_list, compression='gzip', compression_opts=9) self._group.create_dataset(f"image", data=self._img_list, compression='gzip', compression_opts=9) self._f.close() print("Data saved") elif self._f is None: print("Invalid Operation Data not saved") self._f = None self._save_count = 0 world = self.get_world() world.pause() return
4,415
Python
27.490322
121
0.593431
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/MirobotGripperControl/gripper_control.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.manipulators.grippers.surface_gripper import SurfaceGripper from omni.isaac.manipulators import SingleManipulator import numpy as np import carb class GripperControl(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._robot_path = self._server_root + "/Projects/RBROS2/mirobot_ros2/mirobot_description/urdf/mirobot_urdf_2/mirobot_urdf_2.usd" self._joints_default_positions = np.zeros(6) # simulation step counter self._sim_step = 0 return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() # add robot to the scene add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/mirobot") self._gripper = SurfaceGripper( end_effector_prim_path="/World/mirobot/Link6", translate=0.1611, direction="x" ) #define the manipulator self._my_mirobot = self._world.scene.add( SingleManipulator( prim_path="/World/mirobot", name="mirobot", end_effector_prim_name="Link6", gripper=self._gripper, )) # default joint states self._my_mirobot.set_joints_default_state( positions=self._joints_default_positions ) return async def setup_post_load(self): self._world = self.get_world() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._sim_step += 1 return
2,611
Python
33.826666
137
0.661815
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FootEnv/foot_env.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample import numpy as np # Note: checkout the required tutorials at https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.objects import DynamicCuboid from omni.isaac.sensor import Camera from omni.isaac.core import World import omni.graph.core as og import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core import SimulationContext from omni.physx.scripts import deformableUtils, physicsUtils from pxr import UsdGeom, Gf, UsdPhysics, Sdf, Gf, Tf, UsdLux from PIL import Image import carb import h5py import omni import cv2 class FootEnv(BaseSample): def __init__(self) -> None: super().__init__() self._save_count = 0 self._rotate_count = 0 carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) self._time_list = [] self._img_list = [] return def og_setup(self): camprim1 = "/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_ir_left/camera_left/Stream_depth" camprim2 = "/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_rgb/camera_rgb/Stream_rgb" try: og.Controller.edit( {"graph_path": "/ActionGraph", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"), ("RenderProduct1", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RenderProduct2", "omni.isaac.core_nodes.IsaacCreateRenderProduct"), ("RGBPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("DepthPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ("CameraInfoPublish", "omni.isaac.ros2_bridge.ROS2CameraHelper"), ], og.Controller.Keys.SET_VALUES: [ ("RenderProduct1.inputs:cameraPrim", camprim1), ("RenderProduct2.inputs:cameraPrim", camprim2), ("RGBPublish.inputs:topicName", "rgb"), ("RGBPublish.inputs:type", "rgb"), ("RGBPublish.inputs:resetSimulationTimeOnStop", True), ("DepthPublish.inputs:topicName", "depth"), ("DepthPublish.inputs:type", "depth"), ("DepthPublish.inputs:resetSimulationTimeOnStop", True), ("CameraInfoPublish.inputs:topicName", "depth_camera_info"), ("CameraInfoPublish.inputs:type", "camera_info"), ("CameraInfoPublish.inputs:resetSimulationTimeOnStop", True), ], og.Controller.Keys.CONNECT: [ ("OnPlaybackTick.outputs:tick", "RenderProduct1.inputs:execIn"), ("OnPlaybackTick.outputs:tick", "RenderProduct2.inputs:execIn"), ("RenderProduct1.outputs:execOut", "DepthPublish.inputs:execIn"), ("RenderProduct1.outputs:execOut", "CameraInfoPublish.inputs:execIn"), ("RenderProduct2.outputs:execOut", "RGBPublish.inputs:execIn"), ("RenderProduct1.outputs:renderProductPath", "DepthPublish.inputs:renderProductPath"), ("RenderProduct1.outputs:renderProductPath", "CameraInfoPublish.inputs:renderProductPath"), ("RenderProduct2.outputs:renderProductPath", "RGBPublish.inputs:renderProductPath"), ], }, ) except Exception as e: print(e) def camera_setup(self): gemini_usd_path = self._server_root + "/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Sensors/Orbbec/Gemini 2/orbbec_gemini2_V1.0.usd" # TODO : check foot availability add_reference_to_stage( usd_path=gemini_usd_path, prim_path=f"/World/Orbbec_Gemini2", ) self._gemini_mesh = UsdGeom.Mesh.Get(self._stage, "/World/Orbbec_Gemini2") physicsUtils.set_or_add_translate_op(self._gemini_mesh, translate=Gf.Vec3f(0.05, 0.5, 0.5)) # x: -3.1415927, y: 0, z: -1.5707963 rot = rot_utils.euler_angles_to_quats(np.array([ 90, 0, 0 ]), degrees=True) physicsUtils.set_or_add_orient_op(self._gemini_mesh, orient=Gf.Quatf(*rot)) # physicsUtils.set_or_add_scale_op(gemini_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) ldm_light = self._stage.GetPrimAtPath("/World/Orbbec_Gemini2/Orbbec_Gemini2/camera_ldm/camera_ldm/RectLight") ldm_light_intensity = ldm_light.GetAttribute("intensity") ldm_light_intensity.Set(0) def add_female_foot(self): foot_usd_path = self._server_root + "/Projects/DInsight/Female_Foot.usd" add_reference_to_stage( usd_path=foot_usd_path, prim_path=f"/World/foot", ) foot_mesh = UsdGeom.Mesh.Get(self._stage, "/World/foot") physicsUtils.set_or_add_translate_op(foot_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.6)) physicsUtils.set_or_add_orient_op(foot_mesh, orient=Gf.Quatf(-0.5, -0.5, -0.5, -0.5)) physicsUtils.set_or_add_scale_op(foot_mesh, scale=Gf.Vec3f(0.001, 0.001, 0.001)) def add_male_foot(self): foot_usd_path = self._server_root + "/Projects/DInsight/foot1.usd" add_reference_to_stage( usd_path=foot_usd_path, prim_path=f"/World/foot", ) foot_mesh = UsdGeom.Mesh.Get(self._stage, "/World/foot") physicsUtils.set_or_add_translate_op(foot_mesh, translate=Gf.Vec3f(0.0, 0.0, 0.45)) physicsUtils.set_or_add_orient_op(foot_mesh, orient=Gf.Quatf(-0.5, -0.5, -0.5, -0.5)) physicsUtils.set_or_add_scale_op(foot_mesh, scale=Gf.Vec3f(0.25, 0.25, 0.25)) def add_soft_foot(self): foot_usd_path = self._server_root + "/Projects/DInsight/foot2.usd" add_reference_to_stage( usd_path=foot_usd_path, prim_path=f"/World/foot", ) foot_mesh = UsdGeom.Mesh.Get(self._stage, "/World/foot") physicsUtils.set_or_add_translate_op(foot_mesh, translate=Gf.Vec3f(0.2, 0.08, 0.33)) physicsUtils.set_or_add_orient_op(foot_mesh, orient=Gf.Quatf(0.6918005, 0.3113917, 0, 0.6514961)) physicsUtils.set_or_add_scale_op(foot_mesh, scale=Gf.Vec3f(1.0, 1.0, 1.0)) def add_leg_foot(self): foot_usd_path = self._server_root + "/Projects/DInsight/foot3.usd" add_reference_to_stage( usd_path=foot_usd_path, prim_path=f"/World/foot", ) foot_mesh = UsdGeom.Mesh.Get(self._stage, "/World/foot") physicsUtils.set_or_add_translate_op(foot_mesh, translate=Gf.Vec3f(0.0, 0.08, 0.30)) # physicsUtils.set_or_add_orient_op(foot_mesh, orient=Gf.Quatf(-0.5207212, -0.3471475, 0, -0.7799603)) physicsUtils.set_or_add_orient_op(foot_mesh, orient=Gf.Quatf(0.6513039, 0.4342026, 0.3618355, 0.5063066 )) physicsUtils.set_or_add_scale_op(foot_mesh, scale=Gf.Vec3f(0.005, 0.005, 0.005)) def add_light(self): sphereLight = UsdLux.SphereLight.Define(self._stage, Sdf.Path("/World/bottomSphereLight")) sphereLight.CreateIntensityAttr(3000) sphereLight.CreateRadiusAttr(0.05) def setup_scene(self): self._world = self.get_world() self._stage = omni.usd.get_context().get_stage() self._world.scene.add_default_ground_plane() self.simulation_context = SimulationContext() self.add_light() self.camera_setup() # self.add_female_foot() # self.add_male_foot() # self.add_soft_foot() self.add_leg_foot() self.og_setup() # self._f = h5py.File('hello_cam.hdf5','w') # self._group = self._f.create_group("isaac_save_data") return async def setup_post_load(self): self._world.add_physics_callback("sim_step", callback_fn=self.physics_callback) #callback names have to be unique return def physics_callback(self, step_size): # self._camera.get_current_frame() if self._save_count % 11 == 0: current_time = self.simulation_context.current_time omega = 2 * np.pi * self._rotate_count / 100 x_offset, y_offset = 0.5 * np.cos(omega), 0.5 * np.sin(omega) physicsUtils.set_or_add_translate_op(self._gemini_mesh, translate=Gf.Vec3f( 0.05, x_offset, 0.5 + y_offset)) rot = rot_utils.euler_angles_to_quats( np.array([ 90 + 360 / 100 * self._rotate_count, 0, 0 ]), degrees=True) print(f"rot: {rot}") physicsUtils.set_or_add_orient_op(self._gemini_mesh, orient=Gf.Quatf(*rot)) self._time_list.append(current_time) # self._img_list.append(self._camera.get_rgba()[:, :, :3]) print("Data Collected") self._rotate_count += 1 self._save_count += 1 if self._save_count > 1100: self.world_cleanup() # async def setup_pre_reset(self): # return # async def setup_post_reset(self): # return def world_cleanup(self): # self._group.create_dataset(f"sim_time", data=self._time_list, compression='gzip', compression_opts=9) # self._group.create_dataset(f"image", data=self._img_list, compression='gzip', compression_opts=9) # self._f.close() # print("Data saved") self._save_count = 0 self._world.pause() return
10,651
Python
39.501901
131
0.596564
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/ReplicatorBinwithStuffs/replicator_basic.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.core.utils.stage import add_reference_to_stage, get_stage_units from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.core.physics_context.physics_context import PhysicsContext from semantics.schema.editor import remove_prim_semantics from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.examples.base_sample import BaseSample import omni.replicator.core as rep import carb.settings import numpy as np from omni.isaac.core.prims.geometry_prim import GeometryPrim from os.path import expanduser import datetime now = datetime.datetime.now() PROPS = { 'spam' : "/Isaac/Props/YCB/Axis_Aligned/010_potted_meat_can.usd", 'jelly' : "/Isaac/Props/YCB/Axis_Aligned/009_gelatin_box.usd", 'tuna' : "/Isaac/Props/YCB/Axis_Aligned/007_tuna_fish_can.usd", 'cleanser' : "/Isaac/Props/YCB/Axis_Aligned/021_bleach_cleanser.usd", 'tomato_soup' : "/Isaac/Props/YCB/Axis_Aligned/005_tomato_soup_can.usd" } class BinwithStuffs(BaseSample): def __init__(self) -> None: super().__init__() # Nucleus Path Configuration self._isaac_assets_path = get_assets_root_path() self._nucleus_server_path = "omniverse://localhost/NVIDIA/" self.CRATE = self._nucleus_server_path + "Samples/Marbles/assets/standalone/SM_room_crate_3/SM_room_crate_3.usd" self.BIN_URL = self._isaac_assets_path + "/Isaac/Props/KLT_Bin/small_KLT_visual.usd" # Enable scripts carb.settings.get_settings().set_bool("/app/omni.graph.scriptnode/opt_in", True) # Disable capture on play and async rendering carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False) carb.settings.get_settings().set("/omni/replicator/asyncRendering", False) carb.settings.get_settings().set("/app/asyncRendering", False) # Replicator Writerdir now_str = now.strftime("%Y-%m-%d_%H:%M:%S") self._out_dir = str(expanduser("~") + "/Documents/grocery_data_" + now_str) # bin geometry property self._bin_scale = np.array([2.0, 2.0, 0.5]) self._bin_position = np.array([0.0, 0.0, 0.3]) self._plane_scale = np.array([0.24, 0.4, 1.0]) self._plane_position = np.array([0.0, 0.0, 0.4]) self._plane_rotation = np.array([0.0, 0.0, 0.0]) self._sim_step = 0 return def random_props(self, file_name, class_name, max_number=3, one_in_n_chance=4): file_name = self._isaac_assets_path + file_name instances = rep.randomizer.instantiate(file_name, size=max_number, mode='scene_instance') with instances: rep.physics.collider() rep.modify.semantics([('class', class_name)]) rep.randomizer.scatter_2d(self.plane, check_for_collisions=True) rep.modify.pose( rotation=rep.distribution.uniform((-180,-180, -180), (180, 180, 180)), ) visibility_dist = [True] + [False]*(one_in_n_chance) rep.modify.visibility(rep.distribution.choice(visibility_dist)) def random_sphere_lights(self): with self.rp_light: rep.modify.pose( position=rep.distribution.uniform((-0.5, -0.5, 1.0), (0.5, 0.5, 1.0)), ) def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() add_reference_to_stage(usd_path=self.BIN_URL, prim_path="/World/bin") world.scene.add(GeometryPrim(prim_path="/World/bin", name=f"bin_ref_geom", collision=True)) self._bin_ref_geom = world.scene.get_object(f"bin_ref_geom") self._bin_ref_geom.set_local_scale(np.array([self._bin_scale])) self._bin_ref_geom.set_world_pose(position=self._bin_position) self._bin_ref_geom.set_default_state(position=self._bin_position) self.cam = rep.create.camera(position=(0, 0, 2), look_at=(0, 0, 0)) self.rp = rep.create.render_product(self.cam, resolution=(1024, 1024)) self.plane = rep.create.plane( scale=self._plane_scale, position=self._plane_position, rotation=self._plane_rotation, visible=False ) rep.randomizer.register(self.random_props) # rep.randomizer.register(self.random_sphere_lights) return async def setup_post_load(self): # interval : Number of frames to capture before switching. # When generating large datasets, increasing this interval will reduce time taken. # A good value to set is 10. with rep.trigger.on_frame(num_frames=50, interval=2): for n, f in PROPS.items(): self.random_props(f, n) # Create a writer and apply the augmentations to its corresponding annotators self._writer = rep.WriterRegistry.get("BasicWriter") print(f"Writing data to: {self._out_dir}") self._writer.initialize( output_dir=self._out_dir, rgb=True, bounding_box_2d_tight=True, ) # Attach render product to writer self._writer.attach([self.rp]) return
5,609
Python
40.25
120
0.648244
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/FrankaDeformable/franka_deformable.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample # This extension has franka related tasks and controllers as well from omni.isaac.franka import Franka from omni.isaac.franka.controllers import PickPlaceController from omni.isaac.franka.tasks import PickPlace from omni.isaac.core.tasks import BaseTask import numpy as np from omni.isaac.core.materials.deformable_material import DeformableMaterial from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.physx.scripts import deformableUtils, physicsUtils from pxr import UsdGeom, Gf, UsdPhysics import omni.physx import omni.usd import omni class FrankaPlaying(BaseTask): #NOTE: we only cover here a subset of the task functions that are available, # checkout the base class for all the available functions to override. # ex: calculate_metrics, is_done..etc. def __init__(self, name): super().__init__(name=name, offset=None) self._goal_position = np.array([-0.3, -0.3, 0.0515 / 2.0]) self._task_achieved = False return def create_cube(self, stage, prim_path): # Create cube result, path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube") omni.kit.commands.execute("MovePrim", path_from=path, path_to=prim_path) omni.usd.get_context().get_selection().set_selected_prim_paths([], False) cube_mesh = UsdGeom.Mesh.Get(stage, prim_path) physicsUtils.set_or_add_translate_op(cube_mesh, translate=Gf.Vec3f(0.3, 0.3, 0.3)) # physicsUtils.set_or_add_orient_op(cube_mesh, orient=Gf.Quatf(0.707, 0.707, 0, 0)) physicsUtils.set_or_add_scale_op(cube_mesh, scale=Gf.Vec3f(0.04, 0.04, 0.04)) cube_mesh.CreateDisplayColorAttr([(1.0, 0.0, 0.0)]) return def setup_deformable_cube(self, stage, prim_path): # Apply PhysxDeformableBodyAPI and PhysxCollisionAPI to skin mesh and set parameter to default values deformableUtils.add_physx_deformable_body( stage, prim_path, collision_simplification=True, simulation_hexahedral_resolution=4, self_collision=False, ) # Create a deformable body material and set it on the deformable body deformable_material_path = omni.usd.get_stage_next_free_path(stage, "/World/deformableBodyMaterial", True) deformableUtils.add_deformable_body_material( stage, deformable_material_path, youngs_modulus=10000.0, poissons_ratio=0.49, damping_scale=0.0, dynamic_friction=0.5, ) self._cube_prim = stage.GetPrimAtPath(prim_path) physicsUtils.add_physics_material_to_prim(stage, self._cube_prim, prim_path) return # Here we setup all the assets that we care about in this task. def set_up_scene(self, scene): super().set_up_scene(scene) scene.add_default_ground_plane() stage = omni.usd.get_context().get_stage() self.create_cube(stage, "/World/cube") self.setup_deformable_cube(stage, "/World/cube") self._franka = scene.add(Franka(prim_path="/World/Fancy_Franka", name="fancy_franka")) return # Information exposed to solve the task is returned from the task through get_observations def get_observations(self): matrix: Gf.Matrix4d = omni.usd.get_world_transform_matrix(self._cube_prim) translate: Gf.Vec3d = matrix.ExtractTranslation() cube_position = np.array([translate[0], translate[1], translate[2]]) current_joint_positions = self._franka.get_joint_positions() observations = { self._franka.name: { "joint_positions": current_joint_positions, }, "deformable_cube": { "position": cube_position, "goal_position": self._goal_position } } return observations # Called before each physics step, # for instance we can check here if the task was accomplished by # changing the color of the cube once its accomplished def pre_step(self, control_index, simulation_time): return # Called after each reset, # for instance we can always set the gripper to be opened at the beginning after each reset # also we can set the cube's color to be blue def post_reset(self): self._franka.gripper.set_joint_positions(self._franka.gripper.joint_opened_positions) self._task_achieved = False return class FrankaDeformable(BaseSample): def __init__(self) -> None: super().__init__() return def _setup_simulation(self): self._scene = PhysicsContext() self._scene.set_solver_type("TGS") self._scene.set_broadphase_type("GPU") self._scene.enable_gpu_dynamics(flag=True) def setup_scene(self): world = self.get_world() self._setup_simulation() # We add the task to the world here world.add_task(FrankaPlaying(name="deformable_franka_task")) return async def setup_post_load(self): self._world = self.get_world() # The world already called the setup_scene from the task (with first reset of the world) # so we can retrieve the task objects self._franka = self._world.scene.get_object("fancy_franka") self._controller = PickPlaceController( name="pick_place_controller", gripper=self._franka.gripper, robot_articulation=self._franka, ) self._world.add_physics_callback("sim_step", callback_fn=self.physics_step) await self._world.play_async() return async def setup_post_reset(self): self._controller.reset() await self._world.play_async() return def physics_step(self, step_size): # Gets all the tasks observations current_observations = self._world.get_observations() actions = self._controller.forward( picking_position=current_observations["deformable_cube"]["position"], placing_position=current_observations["deformable_cube"]["goal_position"], current_joint_positions=current_observations["fancy_franka"]["joint_positions"], ) self._franka.apply_action(actions) if self._controller.is_done(): self._world.pause() return
6,929
Python
37.932584
114
0.65464
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/WheeledRobotSummitO3Wheel/robotnik_summit.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.examples.base_sample import BaseSample from omni.isaac.wheeled_robots.robots import WheeledRobot from omni.isaac.core.utils.types import ArticulationAction from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.core.physics_context.physics_context import PhysicsContext from omni.isaac.core.utils.nucleus import get_assets_root_path, get_url_root from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController import numpy as np import carb class RobotnikSummit(BaseSample): def __init__(self) -> None: super().__init__() carb.log_info("Check /persistent/isaac/asset_root/default setting") default_asset_root = carb.settings.get_settings().get("/persistent/isaac/asset_root/default") self._server_root = get_url_root(default_asset_root) # wheel models referenced from : https://git.openlogisticsfoundation.org/silicon-economy/simulation-model/o3dynsimmodel self._robot_path = self._server_root + "/Projects/RBROS2/WheeledRobot/Collected_summit_xl_omni_four/summit_xl_omni_four.usd" self._wheel_radius = np.array([ 0.127, 0.127, 0.127, 0.127 ]) self._wheel_positions = np.array([ [0.229, 0.235, 0.11], [0.229, -0.235, 0.11], [-0.229, 0.235, 0.11], [-0.229, -0.235, 0.11], ]) self._wheel_orientations = np.array([ [0.7071068, 0, 0, 0.7071068], [0.7071068, 0, 0, -0.7071068], [0.7071068, 0, 0, 0.7071068], [0.7071068, 0, 0, -0.7071068], ]) self._mecanum_angles = np.array([ -135.0, -45.0, -45.0, -135.0, ]) self._wheel_axis = np.array([1, 0, 0]) self._up_axis = np.array([0, 0, 1]) return def setup_scene(self): world = self.get_world() world.scene.add_default_ground_plane() add_reference_to_stage(usd_path=self._robot_path, prim_path="/World/Summit") self._wheeled_robot = WheeledRobot( prim_path="/World/Summit/summit_xl_base_link", name="my_summit", wheel_dof_names=[ "fl_joint", "fr_joint", "rl_joint", "rr_joint", ], create_robot=True, usd_path=self._robot_path, position=np.array([0, 0.0, 0.02]), orientation=np.array([1.0, 0.0, 0.0, 0.0]), ) self._save_count = 0 self._scene = PhysicsContext() self._scene.set_physics_dt(1 / 30.0) return async def setup_post_load(self): self._world = self.get_world() self._summit_controller = HolonomicController( name="holonomic_controller", wheel_radius=self._wheel_radius, wheel_positions=self._wheel_positions, wheel_orientations=self._wheel_orientations, mecanum_angles=self._mecanum_angles, wheel_axis=self._wheel_axis, up_axis=self._up_axis, ) self._summit_controller.reset() self._wheeled_robot.initialize() self._world.add_physics_callback("sending_actions", callback_fn=self.send_robot_actions) return def send_robot_actions(self, step_size): self._save_count += 1 wheel_action = None if self._save_count >= 0 and self._save_count < 150: wheel_action = self._summit_controller.forward(command=[0.5, 0.0, 0.0]) elif self._save_count >= 150 and self._save_count < 300: wheel_action = self._summit_controller.forward(command=[-0.5, 0.0, 0.0]) elif self._save_count >= 300 and self._save_count < 450: wheel_action = self._summit_controller.forward(command=[0.0, 0.5, 0.0]) elif self._save_count >= 450 and self._save_count < 600: wheel_action = self._summit_controller.forward(command=[0.0, -0.5, 0.0]) elif self._save_count >= 600 and self._save_count < 750: wheel_action = self._summit_controller.forward(command=[0.0, 0.0, 0.3]) elif self._save_count >= 750 and self._save_count < 900: wheel_action = self._summit_controller.forward(command=[0.0, 0.0, -0.3]) else: self._save_count = 0 self._wheeled_robot.apply_wheel_actions(wheel_action) return async def setup_pre_reset(self): if self._world.physics_callback_exists("sim_step"): self._world.remove_physics_callback("sim_step") self._world.pause() return async def setup_post_reset(self): self._summit_controller.reset() await self._world.play_async() self._world.pause() return def world_cleanup(self): self._world.pause() return
5,294
Python
36.821428
132
0.604836
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/URBinFilling/extension.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import asyncio import os import omni.ui as ui from omni.isaac.examples.base_sample import BaseSampleExtension from omni.isaac.ui.ui_utils import btn_builder from .bin_filling import BinFilling class BinFillingExtension(BaseSampleExtension): def on_startup(self, ext_id: str): super().on_startup(ext_id) super().start_extension( menu_name="RoadBalanceEdu", submenu_name="ETRIDemo", name="UR10 Bin Filling", title="UR10 Bin Filling", doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/core_api_tutorials/tutorial_core_adding_manipulator.html", overview="This Example shows how to do bin filling using UR10 robot in Isaac Sim.\n It showcases a realistic surface gripper that breaks with heavy bin load.\nPress the 'Open in IDE' button to view the source code.", sample=BinFilling(), file_path=os.path.abspath(__file__), number_of_extra_frames=1, ) self.task_ui_elements = {} frame = self.get_frame(index=0) self.build_task_controls_ui(frame) return def _on_fill_bin_button_event(self): asyncio.ensure_future(self.sample.on_fill_bin_event_async()) self.task_ui_elements["Start Bin Filling"].enabled = False return def post_reset_button_event(self): self.task_ui_elements["Start Bin Filling"].enabled = True return def post_load_button_event(self): self.task_ui_elements["Start Bin Filling"].enabled = True return def post_clear_button_event(self): self.task_ui_elements["Start Bin Filling"].enabled = False return def build_task_controls_ui(self, frame): with frame: with ui.VStack(spacing=5): # Update the Frame Title frame.title = "Task Controls" frame.visible = True dict = { "label": "Start Bin Filling", "type": "button", "text": "Start Bin Filling", "tooltip": "Start Bin Filling", "on_clicked_fn": self._on_fill_bin_button_event, } self.task_ui_elements["Start Bin Filling"] = btn_builder(**dict) self.task_ui_elements["Start Bin Filling"].enabled = False
2,801
Python
38.464788
228
0.632988
kimsooyoung/rb_issac_tutorial/RoadBalanceEdu/URBinFilling/bin_filling.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import h5py import numpy as np import omni.isaac.core.utils.numpy.rotations as rot_utils from omni.isaac.core.utils.rotations import euler_angles_to_quat from omni.isaac.examples.base_sample import BaseSample from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController from omni.isaac.universal_robots.tasks import BinFilling as BinFillingTask from omni.isaac.core import SimulationContext from omni.isaac.sensor import Camera class BinFilling(BaseSample): def __init__(self) -> None: super().__init__() self._controller = None self._articulation_controller = None self._added_screws = False self._sim_time_list = [] self._joint_positions = [] self._joint_velocities = [] self._camera1_img = [] self._camera2_img = [] self._camera3_img = [] def setup_scene(self): world = self.get_world() self.simulation_context = SimulationContext() world.add_task(BinFillingTask(name="bin_filling")) self._save_count = 0 self._camera1 = Camera( prim_path="/World/Scene/ur10/ee_link/ee_camera", # position=np.array([0.088, 0.0, 0.926]), translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 180.0, -30.0, 0.0 ]), degrees=True), ) self._camera1.set_clipping_range(0.1, 1000000.0) self._camera1.set_focal_length(1.5) self._camera1.initialize() self._camera1.add_motion_vectors_to_frame() self._camera1.set_visibility(False) self._camera2 = Camera( prim_path="/World/side_camera", position=np.array([2.5, 0.0, 0.0]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, 180.0 ]), degrees=True), ) self._camera2.set_focal_length(1.5) self._camera2.set_visibility(False) self._camera2.initialize() self._camera3 = Camera( prim_path="/World/front_camera", position=np.array([0.0, 2.0, 0.0]), # translation=np.array([0.0, 0.0, -0.1]), frequency=30, resolution=(640, 480), orientation=rot_utils.euler_angles_to_quats( np.array([ 0.0, 0.0, -90.0 ]), degrees=True), ) self._camera3.set_focal_length(1.5) self._camera3.set_visibility(False) self._camera3.initialize() self._f = h5py.File('ur_bin_filling.hdf5','w') self._group_f = self._f.create_group("isaac_dataset") self._save_count = 0 self._img_f = self._group_f.create_group("camera_images") return async def setup_post_load(self): self._ur10_task = self._world.get_task(name="bin_filling") self._task_params = self._ur10_task.get_params() self._my_ur10 = self._world.scene.get_object(self._task_params["robot_name"]["value"]) self._controller = PickPlaceController( name="pick_place_controller", gripper=self._my_ur10.gripper, robot_articulation=self._my_ur10 ) self._articulation_controller = self._my_ur10.get_articulation_controller() return def _on_fill_bin_physics_step(self, step_size): self._camera1.get_current_frame() self._camera2.get_current_frame() self._camera3.get_current_frame() current_time = self.simulation_context.current_time current_joint_state = self._my_ur10.get_joints_state() current_joint_positions = current_joint_state.positions current_joint_velocities = current_joint_state.velocities if self._save_count % 100 == 0: self._sim_time_list.append(current_time) self._joint_positions.append(current_joint_positions) self._joint_velocities.append(current_joint_velocities) self._camera1_img.append(self._camera1.get_rgba()[:, :, :3]) self._camera2_img.append(self._camera2.get_rgba()[:, :, :3]) self._camera3_img.append(self._camera3.get_rgba()[:, :, :3]) print("Collecting data...") observations = self._world.get_observations() actions = self._controller.forward( picking_position=observations[self._task_params["bin_name"]["value"]]["position"], placing_position=observations[self._task_params["bin_name"]["value"]]["target_position"], current_joint_positions=observations[self._task_params["robot_name"]["value"]]["joint_positions"], end_effector_offset=np.array([0, -0.098, 0.03]), end_effector_orientation=euler_angles_to_quat(np.array([np.pi, 0, np.pi / 2.0])), ) # if not self._added_screws and self._controller.get_current_event() == 6 and not self._controller.is_paused(): # self._controller.pause() # self._ur10_task.add_screws(screws_number=20) # self._added_screws = True # if self._controller.is_done(): # self._world.pause() self._articulation_controller.apply_action(actions) self._save_count += 1 if self._controller.is_done(): self.save_data() return async def on_fill_bin_event_async(self): world = self.get_world() world.add_physics_callback("sim_step", self._on_fill_bin_physics_step) await world.play_async() return async def setup_pre_reset(self): world = self.get_world() if world.physics_callback_exists("sim_step"): world.remove_physics_callback("sim_step") self._controller.reset() self._added_screws = False return def world_cleanup(self): self._controller = None self._added_screws = False return def save_data(self): self._group_f.create_dataset(f"sim_time", data=self._sim_time_list, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_positions", data=self._joint_positions, compression='gzip', compression_opts=9) self._group_f.create_dataset(f"joint_velocities", data=self._joint_velocities, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"ee_camera", data=self._camera1_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"side_camera", data=self._camera2_img, compression='gzip', compression_opts=9) self._img_f.create_dataset(f"front_camera", data=self._camera3_img, compression='gzip', compression_opts=9) self._f.close() print("Data saved") self._save_count = 0 self._world.pause() return
7,425
Python
38.291005
126
0.61037
kimsooyoung/rb_issac_tutorial/config/extension.toml
[core] reloadable = true order = 0 [package] version = "1.0.0" category = "Simulation" title = "RoadBalanceEdu" description = "NVIDIA Isaac Sim tutorial Extension" authors = ["RoadBalanceEdu"] repository = "" keywords = [] changelog = "docs/CHANGELOG.md" readme = "docs/README.md" preview_image = "data/preview.png" icon = "data/icon.png" [dependencies] "omni.kit.uiapp" = {} "omni.isaac.ui" = {} "omni.isaac.core" = {} [[python.module]] name = "RoadBalanceEdu.HelloWorld" [[python.module]] name = "RoadBalanceEdu.HelloRobot" [[python.module]] name = "RoadBalanceEdu.HelloManipulator" [[python.module]] name = "RoadBalanceEdu.HelloMultiRobot" [[python.module]] name = "RoadBalanceEdu.HelloMultiTask" [[python.module]] name = "RoadBalanceEdu.HelloCamera" [[python.module]] name = "RoadBalanceEdu.HelloLight" [[python.module]] name = "RoadBalanceEdu.HelloDeformable" [[python.module]] name = "RoadBalanceEdu.FootEnv" [[python.module]] name = "RoadBalanceEdu.FrankaNuts" [[python.module]] name = "RoadBalanceEdu.FrankaNutsTable" [[python.module]] name = "RoadBalanceEdu.FrankaDeformable" [[python.module]] name = "RoadBalanceEdu.URBinFilling" [[python.module]] name = "RoadBalanceEdu.URPalletizing" [[python.module]] name = "RoadBalanceEdu.DingoLibrary" [[python.module]] name = "RoadBalanceEdu.FrankaFactory" [[python.module]] name = "RoadBalanceEdu.ManipGripperControl" [[python.module]] name = "RoadBalanceEdu.ManipFollowTarget" [[python.module]] name = "RoadBalanceEdu.ManipPickandPlace" [[python.module]] name = "RoadBalanceEdu.ManipLULA" [[python.module]] name = "RoadBalanceEdu.MirobotGripperControl" [[python.module]] name = "RoadBalanceEdu.MirobotFollowTarget" [[python.module]] name = "RoadBalanceEdu.MirobotPickandPlace" [[python.module]] name = "RoadBalanceEdu.ManipURGripper" [[python.module]] name = "RoadBalanceEdu.MirobotPickandPlaceROS2" [[python.module]] name = "RoadBalanceEdu.SurfaceGripper" [[python.module]] name = "RoadBalanceEdu.WheeledRobotLimoDiff" [[python.module]] name = "RoadBalanceEdu.WheeledRobotLimoDiffROS2" [[python.module]] name = "RoadBalanceEdu.WheeledRobotLimoAckermannROS2" [[python.module]] name = "RoadBalanceEdu.WheeledRobotLimoAckermannTwistROS2" [[python.module]] name = "RoadBalanceEdu.WheeledRobotsKaya" [[python.module]] name = "RoadBalanceEdu.WheeledRobotSummit" [[python.module]] name = "RoadBalanceEdu.WheeledRobotSummitO3Wheel" [[python.module]] name = "RoadBalanceEdu.WheeledRobotSummitO3WheelROS2" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorCubeRandomRotation" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorSpamRandomPose" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorScatter2D" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorBinwithStuffs" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorFactory" #[[python.module]] #name = "RoadBalanceEdu.ReplicatorFactoryDemo" # [[python.module]] # name = "RoadBalanceEdu.ReplicatorFactoryDemoROS2" [[python.module]] name = "RoadBalanceEdu.ETRIusbA" [[python.module]] name = "RoadBalanceEdu.ETRIcable" [[python.module]] name = "RoadBalanceEdu.ETRIUR10" [[python.module]] name = "RoadBalanceEdu.ETRIFrankaGripper" [[python.module]] name = "RoadBalanceEdu.ETRIUR102F85" #[[python.module]] #name = "RoadBalanceEdu.FrankaFollowTarget" #[[python.module]] #name = "RoadBalanceEdu.FrankaCabinet" [[python.module]] name = "RoadBalanceEdu.LimoDiffROS2" [[python.module]] name = "RoadBalanceEdu.LimoAckermannROS2" [[python.module]] name = "RoadBalanceEdu.SimpleRobotFollowTarget"
3,548
TOML
19.164773
58
0.755637
kimsooyoung/rb_issac_tutorial/docs/CHANGELOG.md
# Changelog ## [0.1.0] - 2024-02-23 ### Added - Initial version of RoadBalanceEdu Extension
95
Markdown
10.999999
45
0.673684
kimsooyoung/rb_issac_tutorial/docs/README.md
# Usage To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name} - Clone This repo into your prefer location. - Navigate to `Window`` -> `Extensions` in the toolbar to open the Extensions Manager. - Click the hamburger icon in the Extensions Manager, and then `Settings` in the sub-menu to add the path to the folder you created for your user extensions. - Find your extension in the Extensions Manager under `Third Party Extensions` and enable it. It will now appear in the toolbar. > please refer this link : https://docs.omniverse.nvidia.com/isaacsim/latest/introductory_tutorials/tutorial_intro_workflows.html#isaac-sim-extension-workflow <p align="center"> <img src="./img/demo_img.png" height="200"> </p>
781
Markdown
54.857139
159
0.759283
daniel-kun/omni/README.md
**INFO** _My efforts around Omni have been on hold for a long time and will be for the foreseeable future - mostly because my kids keep me busy :-) In the meantime, a few other projects spawned and have made progress, that I would like to mention:_ * [isomorƒ](https://isomorf.io/) * [Unison](http://unisonweb.org/) * [Expressions of Change](http://www.expressionsofchange.org/) * [Codeflow](http://codeflow.co/) * [Luna](http://www.luna-lang.org/) # The Omni Programming Language Manifest This manifest declares goals that The Omni Programming Language is designed to reach. Omni is meant to provide a new way of coding. A way of coding where less time is spent on text editing and more time spent on engineering, designing, defining and changing logic and architecture, assuring quality and documenting. In short, Omni frees the programmer of unnecessary tasks that the classic way of writing programs in text files imposes on them to let you get things done. # Goals ## Productivity Higher productivity and quality for programmers * Let programmers focus on coding * Let programmers create higher quality and more robust code, with less effort * Integrated backlogs and work tasks ## Agility * Make wide-scale code changes *safe*, fast and easy * Make code easier to refactor * Make code easier to be statically analysed * Make code easier to be automatically manipulated ## Quality * Integrated unit tests * Integrated test director * Integrated documentation system ## Technology * Code is not stored in text files, but in a database * This enables Omni to present code pieces independent of their physical storage * Declarative and imperative code model in the same language * Deterministic Garbage Collector via Automatic Reference Counting * API to access the code model (for coded refactoring, analysis, metrics, reports, etc.) * Built-in, compile-time-safe multithreading # Further readings - [Motivation](Motivation.md "Motivation") - Technology (Coming soon) - Vision (Coming soon) # Status quo Update: 15.05.2017 After 2 years of hibernation, I returned to working on this project. I took the time to follow current development trends, see all the hilarious rise and fall of JavaScript-languages / Web Frontend Frameworks and contemplating about whether I should build Omni as an on-premise desktop software or an online SaaS. I finally concluded that a web based approach is more suitable to the current direction of the Developer Ecosystem and Communities. I re-started work on Omni using [Elm](http://elm-lang.org/) as a Front End language/framework and was somewhat surprised. [The Elm Architeture](https://guide.elm-lang.org/architecture/) actually will make implement Omni *much easier* than my last approach using WPF, and even more robust! I am very pleased with what I have seen and learned so far and am currently investigating Backend solutions. (I tried [ASP.NET Core](https://www.asp.net/), with which I already have experience, and am installing Scala, sbt and [Play](https://www.playframework.com/) right now. I'd like to have a "mainstream" functional language in the backend, to make contributions more accessible.) I'll post an update as soon as I have a similar prototype ready to what you can see in WPF below. Update: 23.06.2015 More work on the input system Prototype, looks good so far. Well, not exactly pretty, but the mechanics are sound. Look at this awesome stuff: ![](media/OmniInputMathTermsWithFractal.gif) Update: 10.06.2015 The progress on the Prototype is coming along nicely. I chose to implement the prototype in C#, because a) I am much faster in GUI coding in C# + WPF than in C++ with Qt (but can't use it for the final product because it is not cross-platform) and b) I will not be able to copy+paste quick&dirty-code from the prototype to the production code. Here's a small teaser: ![](media/OmniPrototypeScreenshot-v1.png) Just for kicks, I hooked up two views that display the same code model with different templates. This way you can synchronously edit the same code in C-style and in Lisp-style. Feels awesome! Update: 02.06.2015 I have made progress on a prototype for the input architecture. This is not trivial, but I might be able to show a small sneak preview soon. Update: 13.05.2015 I haven't been working much on th code base lately, because my primary goal was to get portability before driving features. I have ported the build system to CMake and Omni now builds on Win32 and Linux on x86, x86_64 and ARM v7 (using a Raspberry Pi as a development platform). There is even a little bi GUI up and running (on all platforms). I have built a few input fields for literal expressions and variable definitions. I am not very happy with the result, though - both the code architecture and the look&feel. So I decided to do some backend work, which currently consists mainly of reseach, hence little code changes. I am trying to integrate either static code analysis or automatic unit test generation to Omni, so I am currently looking around to see what has already been done. My plan is to update the Wikipedia article to include a comprehensive feature comparison of C++ Static Code Analysis tools. Omni is going slow-paced, because I don't find much time to work on it. But this gives me the time to build up the ideas and the big picture in my mind before hacking away. :-) Update: 04.11.2014 Currently, the code model exists for basic structures such as functions, variables, if/for/while/etc. and can be compiled to binaries on Win32 and Linux. A flexible meta-information system that allows you to attach any kind of information to entity types (expressions and statements) is finished and used to combine the code model and the UI. There are some little prototypes for the UI to edit and view elements such as literals and variable declarations. The next step will be a "free-form" input field that automatically detects the type of entity that is being written and auto-transforms in that entity's special ui. E.g. when I write "va", it auto-expands to the UI for variable declarations, looking like "var [variable-name] = [init-expression]". In parallel, I am writing the docs "Motivation" and "Technology". "Vision" is to follow.
6,246
Markdown
62.744897
805
0.779379