diff --git a/wild_visual_navigation/image_projector/image_projector.py b/wild_visual_navigation/image_projector/image_projector.py index 8024e912..7c0286cf 100644 --- a/wild_visual_navigation/image_projector/image_projector.py +++ b/wild_visual_navigation/image_projector/image_projector.py @@ -193,7 +193,7 @@ def project_and_render( return self.masks, image_overlay, projected_points, valid_points def project_and_render_on_map( - self, pose_base_in_world: torch.tensor, points: torch.tensor, colors: torch.tensor, map_resolution: float, map_size: int, image: torch.tensor = None + self, pose_base_in_world: torch.tensor, points: torch.tensor, colors: torch.tensor, map_resolution: float, map_size: int, center_pose: torch.tensor = None ): """Projects the points and returns an image with the projection @@ -214,16 +214,23 @@ def project_and_render_on_map( H = map_size W = map_size self.masks = torch.zeros((B, C, H, W), dtype=torch.float32, device=self.camera.camera_matrix.device) - image_overlay = image T_BW = pose_base_in_world.inverse() # Convert from fixed to base frame points_B = transform_points(T_BW, points) + # Convert from last node base frame to center node base frame + if center_pose is not None: + T_BW_relative = center_pose + points_B = transform_points(T_BW_relative, points_B) + # Remove z dimension # TODO: project footprint on gravity aligned plane flat_points = points_B[:, :, :-1] # (B, N, 2) + # Flip x axis + # flat_points[:, :, 0] = -flat_points[:, :, 0] + # Center index of flat_points for first dimentions # Hack to center points # center_patch = flat_points[flat_points.shape[0] // 2, :, :] @@ -237,16 +244,10 @@ def project_and_render_on_map( # Fill the mask self.masks = draw_convex_polygon(self.masks, flat_points, colors) - # Draw on image (if applies) - if image is not None: - if len(image.shape) != 4: - image = image[None] - image_overlay = draw_convex_polygon(image, flat_points, colors) - # Return torch masks self.masks[self.masks == 0.0] = torch.nan - return self.masks, image_overlay + return self.masks def resize_image(self, image: torch.tensor): return self.image_crop(image) diff --git a/wild_visual_navigation/traversability_estimator/traversability_estimator.py b/wild_visual_navigation/traversability_estimator/traversability_estimator.py index d9176c88..c4c1d8b9 100644 --- a/wild_visual_navigation/traversability_estimator/traversability_estimator.py +++ b/wild_visual_navigation/traversability_estimator/traversability_estimator.py @@ -357,11 +357,15 @@ def add_proprio_node(self, pnode: ProprioceptionNode, projection_mode: str = "im if (not hasattr(last_mission_node, "supervision_mask")) or (last_mission_node.supervision_mask is None): return False - # Get all mission nodes within a range mission_nodes = self._mission_graph.get_nodes_within_radius_range( - last_mission_node, 0, self._proprio_graph.max_distance, metric="dijkstra" # pose or dijkstra + last_mission_node, 0, self._proprio_graph.max_distance, metric="pose" # pose or dijkstra ) + center_nodes = self._mission_graph.get_nodes_within_radius_range( + last_mission_node, 0, self._proprio_graph.max_distance / 2, metric="pose" # pose or dijkstra + ) + center_node = center_nodes[0] + if len(mission_nodes) < 1: return False @@ -378,10 +382,6 @@ def add_proprio_node(self, pnode: ProprioceptionNode, projection_mode: str = "im ) pose_camera_in_world = torch.eye(4, device=self._device).repeat(B, 1, 1) pose_base_in_world = torch.eye(4, device=self._device).repeat(B, 1, 1) - # pose_pc_in_base = torch.eye(4, device=self._device).repeat(B, 1, 1) - # pose_pc_in_world = torch.eye(4, device=self._device).repeat(B, 1, 1) - pose_pc_in_base = {} - pose_pc_in_world = {} H = last_mission_node.image_projector.camera.height W = last_mission_node.image_projector.camera.width @@ -397,6 +397,9 @@ def add_proprio_node(self, pnode: ProprioceptionNode, projection_mode: str = "im else: supervision_masks[i] = mnode.supervision_mask # Getting all the existing supervision masks + center_pose = pose_base_in_world[-1].to("cpu").inverse() @ center_node.pose_base_in_world.to("cpu") + center_pose = center_pose.to(self._device).repeat(B, 1, 1) + im = ImageProjector(K, H, W) map_resolution = self._map_resolution @@ -405,7 +408,7 @@ def add_proprio_node(self, pnode: ProprioceptionNode, projection_mode: str = "im if projection_mode == "image": mask, _, _, _ = im.project_and_render(pose_camera_in_world, footprints, color) # Generating the new supervisiom mask to add elif projection_mode == "map": - mask, _ = im.project_and_render_on_map(pose_base_in_world, footprints, color, map_resolution, map_size) + mask = im.project_and_render_on_map(pose_base_in_world, footprints, color, map_resolution, map_size, center_pose=center_pose) # Update traversability # mask = mask * pnode.traversability @@ -427,16 +430,6 @@ def add_proprio_node(self, pnode: ProprioceptionNode, projection_mode: str = "im str(mnode.timestamp).replace(".", "_") + ".pt", )) - # Write as grid map msg and save as rosbag - # mask_torch = mask.cpu().numpy()[np.newaxis, ...].astype(np.uint8) - # mask_msg = self.torch_array_to_grid_map(mask_torch, res=0.1, layers=["target"], - # timestamp=rospy.Time.from_sec(mnode.timestamp), - # reference_frame="odom", x=0, y=0) - # # Hack to also publish a clock and record a new bag - # # print(rospy.Time.from_sec(mnode.timestamp)) - # self.pub_clock.publish(rospy.Time.from_sec(mnode.timestamp)) - # self.pub_grid_map.publish(mask_msg) - # Save mask as jpg mask_jpg = mask.cpu().numpy().astype(np.uint8) * 255 cv2.imwrite(os.path.join( diff --git a/wild_visual_navigation_ros/config/elevation_mapping_cupy/anymal_parameters.yaml b/wild_visual_navigation_ros/config/elevation_mapping_cupy/anymal_parameters.yaml index e43278b9..eb6d336e 100644 --- a/wild_visual_navigation_ros/config/elevation_mapping_cupy/anymal_parameters.yaml +++ b/wild_visual_navigation_ros/config/elevation_mapping_cupy/anymal_parameters.yaml @@ -1,5 +1,5 @@ #### Basic parameters ######## -resolution: 0.04 # resolution in m. +resolution: 0.1 # resolution in m. map_length: 8 # map's size in m. sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). mahalanobis_thresh: 2.0 # points outside this distance is outlier. diff --git a/wild_visual_navigation_ros/config/wild_visual_navigation/default.yaml b/wild_visual_navigation_ros/config/wild_visual_navigation/default.yaml index a4b76b74..52020c89 100644 --- a/wild_visual_navigation_ros/config/wild_visual_navigation/default.yaml +++ b/wild_visual_navigation_ros/config/wild_visual_navigation/default.yaml @@ -24,15 +24,15 @@ base_frame: base footprint_frame: footprint # Robot size -robot_length: 0.8 -robot_width: 0.4 -robot_height: 0.3 +robot_length: 0.8 # 0.8 +robot_width: 0.4 # 0.4 +robot_height: 0.3 # 0.3 # Robot specs robot_max_velocity: 0.8 # Traversability estimation params -traversability_radius: 7.0 # meters +traversability_radius: 10.0 # meters image_graph_dist_thr: 0.2 # meters proprio_graph_dist_thr: 0.1 # meters network_input_image_height: 480