diff --git a/example/vector_retargeting/realtime_capture_show.py b/example/vector_retargeting/realtime_capture_show.py new file mode 100644 index 0000000..a6ed248 --- /dev/null +++ b/example/vector_retargeting/realtime_capture_show.py @@ -0,0 +1,190 @@ +from pathlib import Path +import multiprocessing +import time +import cv2 +from loguru import logger +import numpy as np +import sapien +from sapien.asset import create_dome_envmap +from sapien.utils import Viewer + +from dex_retargeting.constants import RobotName, RetargetingType, HandType, get_default_config_path +from dex_retargeting.retargeting_config import RetargetingConfig +from dex_retargeting.seq_retarget import SeqRetargeting +from single_hand_detector import SingleHandDetector + + +def start_retargeting(queue: multiprocessing.Queue, robot_dir:str, config_path: str, robot_name:str): + RetargetingConfig.set_default_urdf_dir(str(robot_dir)) + logger.info(f"Start retargeting with config {config_path}") + retargeting = RetargetingConfig.load_from_file(config_path).build() + detector = SingleHandDetector(hand_type="Right", selfie=False) + + # init show ui + meta_data = { + "dof": len(retargeting.optimizer.robot.dof_joint_names), + "joint_names":retargeting.optimizer.robot.dof_joint_names, + } + + headless = False + if not headless: + sapien.render.set_viewer_shader_dir("default") + sapien.render.set_camera_shader_dir("default") + else: + sapien.render.set_viewer_shader_dir("rt") + sapien.render.set_camera_shader_dir("rt") + sapien.render.set_ray_tracing_samples_per_pixel(16) + sapien.render.set_ray_tracing_path_depth(8) + sapien.render.set_ray_tracing_denoiser("oidn") + + config = RetargetingConfig.load_from_file(config_path) + + # Setup + scene = sapien.Scene() + + # Ground + render_mat = sapien.render.RenderMaterial() + render_mat.base_color = [0.06, 0.08, 0.12, 1] + render_mat.metallic = 0.0 + render_mat.roughness = 0.9 + render_mat.specular = 0.8 + scene.add_ground(-0.2, render_material=render_mat, render_half_size=[1000, 1000]) + + # Lighting + scene.add_directional_light(np.array([1, 1, -1]), np.array([3, 3, 3])) + scene.add_point_light(np.array([2, 2, 2]), np.array([2, 2, 2]), shadow=False) + scene.add_point_light(np.array([2, -2, 2]), np.array([2, 2, 2]), shadow=False) + scene.set_environment_map(create_dome_envmap(sky_color=[0.2, 0.2, 0.2], ground_color=[0.2, 0.2, 0.2])) + scene.add_area_light_for_ray_tracing(sapien.Pose([2, 1, 2], [0.707, 0, 0.707, 0]), np.array([1, 1, 1]), 5, 5) + + # Camera + cam = scene.add_camera(name="Cheese!", width=600, height=600, fovy=1, near=0.1, far=10) + cam.set_local_pose(sapien.Pose([0.50, 0, 0.0], [0, 0, 0, -1])) + + + viewer = Viewer() + viewer.set_scene(scene) + viewer.control_window.show_origin_frame = False + viewer.control_window.move_speed = 0.01 + viewer.control_window.toggle_camera_lines(False) + viewer.set_camera_pose(cam.get_local_pose()) + + # Load robot and set it to a good pose to take picture + loader = scene.create_urdf_loader() + filepath = Path(config.urdf_path) + robot_name = filepath.stem + loader.load_multiple_collisions_from_file = True + if "ability" in robot_name: + loader.scale = 1.5 + elif "dclaw" in robot_name: + loader.scale = 1.25 + elif "allegro" in robot_name: + loader.scale = 1.4 + elif "shadow" in robot_name: + loader.scale = 0.9 + elif "bhand" in robot_name: + loader.scale = 1.5 + elif "leap" in robot_name: + loader.scale = 1.4 + elif "svh" in robot_name: + loader.scale = 1.5 + + + if "glb" not in robot_name: + filepath = str(filepath).replace(".urdf", "_glb.urdf") + else: + filepath = str(filepath) + + robot = loader.load(filepath) + + if "ability" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.15])) + elif "shadow" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.2])) + elif "dclaw" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.15])) + elif "allegro" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.05])) + elif "bhand" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.2])) + elif "leap" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.15])) + elif "svh" in robot_name: + robot.set_pose(sapien.Pose([0, 0, -0.13])) + + # Different robot loader may have different orders for joints + sapien_joint_names = [joint.get_name() for joint in robot.get_active_joints()] + retargeting_joint_names = meta_data["joint_names"] + retargeting_to_sapien = np.array([retargeting_joint_names.index(name) for name in sapien_joint_names]).astype(int) + + for _ in range(2): + viewer.render() + + while True: + rgb = queue.get() + if rgb is None: + time.sleep(.1) + continue + + _, joint_pos, _, _ = detector.detect(rgb) + logger.info("join pos {}", joint_pos) + if joint_pos is None: + continue + + retargeting_type = retargeting.optimizer.retargeting_type + indices = retargeting.optimizer.target_link_human_indices + if retargeting_type == "POSITION": + indices = indices + ref_value = joint_pos[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_pos[task_indices, :] - joint_pos[origin_indices, :] + qpos = retargeting.retarget(ref_value) + logger.info(f"qpos {qpos}") + robot.set_qpos((qpos)[retargeting_to_sapien]) + + for _ in range(2): + viewer.render() + + +def produce_frame(queue: multiprocessing.Queue): + cap = cv2.VideoCapture(0) + while cap.isOpened(): + success, image = cap.read() + if not success: + continue + frame = image + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + queue.put(image) + time.sleep(.2) + cv2.imshow('demo', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + +def main(): + robot_name: RobotName = RobotName.allegro + retargeting_type: RetargetingType = RetargetingType.vector + hand_type: HandType = HandType.right + + config_path = get_default_config_path(robot_name, retargeting_type, hand_type) + robot_dir = Path(__file__).absolute().parent.parent.parent / "assets" / "robots" / "hands" + + queue = multiprocessing.Queue(maxsize=1000) + producer_process = multiprocessing.Process(target=produce_frame, args=(queue,)) + consumer_process = multiprocessing.Process(target=start_retargeting, args=(queue, str(robot_dir), str(config_path), robot_name)) + + producer_process.start() + consumer_process.start() + + producer_process.join() + consumer_process.join() + time.sleep(5) + + print("done") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/setup.py b/setup.py index ed97489..8d1ecf5 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ "anytree", "pyyaml", "lxml", + "loguru", ] # Check whether you have torch installed