diff --git a/opencda/core/common/vehicle_manager.py b/opencda/core/common/vehicle_manager.py index 108dfafcd..869d5d023 100644 --- a/opencda/core/common/vehicle_manager.py +++ b/opencda/core/common/vehicle_manager.py @@ -26,6 +26,9 @@ from opencda.core.common.data_dumper import DataDumper from opencda.customize.platooning.platooning_behavior_agent import PlatooningBehaviorAgentExtended from opencda.core.application.platooning.intruder_behavior_agent import IntruderBehaviorAgent +import matplotlib.pyplot as plt +import numpy as np +import time class VehicleManager(object): @@ -150,6 +153,7 @@ def __init__( cav_world.update_vehicle_manager(self) + # self.msvan3tAgent = Msvan3tAgent(cav_world, self.vid, self.perception_manager, self.localizer, self.carla_map, self.vehicle) def set_destination( @@ -239,6 +243,8 @@ def run_step(self, target_speed=None, target_acceleration=None): control = self.controller.run_step(target_speed, target_pos, target_acceleration) + + # dump data if self.data_dumper: self.data_dumper.run_step(self.perception_manager, diff --git a/opencda/core/sensing/perception/o3d_lidar_libs.py b/opencda/core/sensing/perception/o3d_lidar_libs.py index 5abc98401..483e2981a 100644 --- a/opencda/core/sensing/perception/o3d_lidar_libs.py +++ b/opencda/core/sensing/perception/o3d_lidar_libs.py @@ -17,6 +17,8 @@ import opencda.core.sensing.perception.sensor_transformation as st from opencda.core.sensing.perception.obstacle_vehicle import \ is_vehicle_cococlass, ObstacleVehicle +from opencda.core.sensing.perception.obstacle_pedestrian import \ + ObstacleVRU from opencda.core.sensing.perception.static_obstacle import StaticObstacle VIRIDIS = np.array(cm.get_cmap('plasma').colors) @@ -95,6 +97,7 @@ def o3d_visualizer_init(actor_id): Initialize open3d visualizer. """ + # o3d.visualization.draw_geometries([o3d.geometry.TriangleMesh.create_coordinate_frame()]) #test vis = o3d.visualization.Visualizer() vis.create_window(window_name=str(actor_id), width=800, @@ -102,8 +105,12 @@ def o3d_visualizer_init(actor_id): left=480, top=270) vis.get_render_option().background_color = [0, 0, 0] - vis.get_render_option().point_size = 1 + vis.get_render_option().point_size = 0.5 vis.get_render_option().show_coordinate_frame = True + vis.get_view_control().camera_local_translate(10, 0, 0) + # ctr.set_zoom(5) + # ctr.set_constant_z_near(5) + # ctr.scale(2) return vis @@ -140,7 +147,7 @@ def o3d_visualizer_show(vis, count, point_cloud, objects, LDM=False): LDM_geometries = [] for key, object_list in objects.items(): # we only draw vehicles for now - if key != 'vehicles': + if key != 'vehicles' and key != 'VRU': continue for object_ in object_list: if LDM is True: @@ -173,7 +180,7 @@ def o3d_visualizer_show(vis, count, point_cloud, objects, LDM=False): time.sleep(0.001) for key, object_list in objects.items(): - if key != 'vehicles': + if key != 'vehicles' and key != 'VRU': continue for object_ in object_list: if object_.o3d_obb is not None: @@ -219,46 +226,46 @@ def o3d_visualizer_showLDM(vis, count, point_cloud, objects, groundTruth): LDM_geometries = [] for key, object_list in objects.items(): # we only draw vehicles for now - if key != 'vehicles': + if key != 'vehicles' and key != 'VRU': continue for object_ in object_list: if object_.perception.line_set is not None: geometry = object_.perception.line_set - if object_.connected: + if not object_.detected: colors = [[0, 1, 0] for _ in range(12)] geometry.colors = o3d.utility.Vector3dVector(colors) - elif not object_.connected and object_.onSight and object_.tracked: + elif object_.detected and object_.onSight and object_.tracked: colors = [[1, 0, 0] for _ in range(12)] geometry.colors = o3d.utility.Vector3dVector(colors) - elif not object_.connected and not object_.onSight and object_.CPM: + elif object_.detected and not object_.onSight and object_.CPM: colors = [[1, 0.6, 0] for _ in range(12)] geometry.colors = o3d.utility.Vector3dVector(colors) - elif not object_.connected and not object_.onSight and object_.tracked: + elif object_.detected and not object_.onSight and object_.tracked: colors = [[0.7, 0, 0] for _ in range(12)] geometry.colors = o3d.utility.Vector3dVector(colors) else: continue else: geometry = object_.perception.o3d_bbx - if object_.connected: + if not object_.detected: geometry.color = (0, 1, 0) - elif not object_.connected and object_.onSight and object_.tracked: + elif object_.detected and object_.onSight and object_.tracked: geometry.color = (1, 0, 0) - elif not object_.connected and not object_.onSight and object_.CPM: + elif object_.detected and not object_.onSight and object_.CPM: geometry.color = (1, 0.6, 0) - elif not object_.connected and object_.tracked: + elif object_.detected and object_.tracked: geometry.color = (0.5, 0, 0) else: continue - vis.add_geometry(geometry) + LDM_geometries.append(geometry) # vis.add_geometry(test_rotation()) for key, object_list in groundTruth.items(): # we only draw vehicles for now - if key != 'vehicles': + if key != 'vehicles' and key != 'VRU': continue for object_ in object_list: geometry = object_.o3d_bbx @@ -387,15 +394,28 @@ def o3d_camera_lidar_fusion(objects, dtype=np.int), axis=0)[0][0] y_common = mode(np.array(np.abs(select_points[:, 1]), dtype=np.int), axis=0)[0][0] - points_inlier = (np.abs(select_points[:, 0]) > x_common - 3) & \ - (np.abs(select_points[:, 0]) < x_common + 3) & \ - (np.abs(select_points[:, 1]) > y_common - 3) & \ - (np.abs(select_points[:, 1]) < y_common + 3) + points_inlier = (np.abs(select_points[:, 0]) > x_common - 5) & \ + (np.abs(select_points[:, 0]) < x_common + 5) & \ + (np.abs(select_points[:, 1]) > y_common - 5) & \ + (np.abs(select_points[:, 1]) < y_common + 5) select_points = select_points[points_inlier] - if select_points.shape[0] < 4: + min_points_required = { # Map label to minimum required points + 2: 4, # vehicle + 5: 4, # tuck + 7: 4, # bus + 0: 4, # pedestrian + 1: 4, # bicycle + 3: 4, # motorcycle + } + + if select_points.shape[0] < min_points_required.get(label, 0): continue + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(select_points) + labels = np.array(pcd.cluster_dbscan(eps=0.7, min_points=min_points_required.get(label, 0), print_progress=False)) + # to visualize 3d lidar points in o3d visualizer, we need to # revert the x coordinates select_points[:, :1] = -select_points[:, :1] @@ -441,6 +461,257 @@ def o3d_camera_lidar_fusion(objects, else: objects['vehicles'] = [obstacle_vehicle] # todo: refine the category + elif label in (0, 1, 3): + obstacle_VRU = ObstacleVRU(corner, aabb, confidence=confidence) + if label == 0: + obstacle_VRU.itsType = 'pedestrian' + elif label == 1: + obstacle_VRU.itsType = 'bicycle' + elif label == 3: + obstacle_VRU.itsType = 'motorcycle' + if obb is not None: + obstacle_VRU.o3d_obb = obb + obstacle_VRU.bounding_box.extent.x = obb.extent[0] / 2 + obstacle_VRU.bounding_box.extent.y = obb.extent[1] / 2 + yaw = ego_pos.rotation.yaw - np.degrees(np.arctan2(np.array(obb.R)[1, 0], np.array(obb.R)[0, 0])) + # if label == 0: + # obstacle_VRU.itsType = 'pedestrian' + # elif label == 1: + # obstacle_VRU.itsType = 'bicycle' + # elif label == 3: + # obstacle_VRU.itsType = 'motorcycle' + + obstacle_VRU.yaw = yaw + if 'VRU' in objects: + objects['VRU'].append(obstacle_VRU) + else: + objects['VRU'] = [obstacle_VRU] + # we regard or other obstacle rather than vehicle as static class + else: + static_obstacle = StaticObstacle(corner, aabb) + if 'static' in objects: + objects['static'].append(static_obstacle) + else: + objects['static'] = [static_obstacle] + + return objects + +def o3d_camera_lidar_fusion2(objects, + yolo_bbx, + lidar_3d, + projected_lidar, + lidar_sensor, + ego_pos = None): + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(lidar_3d[:, :3]) + labels = np.array(pcd.cluster_dbscan(eps=0.7, min_points=10, print_progress=False)) + max_label = labels.max() + bounding_boxes = [] + for i in range(max_label + 1): + indices = np.where(labels == i)[0] + cluster_points = np.asarray(pcd.points)[indices] + if len(cluster_points) > 4: + cluster_pcd = o3d.geometry.PointCloud() + cluster_pcd.points = o3d.utility.Vector3dVector(cluster_points) + aabb = cluster_pcd.get_axis_aligned_bounding_box() + obb = None + try: + obb = cluster_pcd.get_oriented_bounding_box() + obb.color = (0, 1, 0) + except RuntimeError as e: + # print("Unable to compute the oriented bounding box:", e) + pass + # get the eight corner of the bounding boxes. + corner = np.asarray(aabb.get_box_points()) + # covert back to unreal coordinate + corner[:, :1] = -corner[:, :1] + corner = corner.transpose() + # extend (3, 8) to (4, 8) for homogenous transformation + corner = np.r_[corner, [np.ones(corner.shape[1])]] + # project to world reference + corner = st.sensor_to_world(corner, lidar_sensor.get_transform()) + corner = corner.transpose()[:, :3] + + obstacle_vehicle = ObstacleVehicle(corner, aabb, confidence=0.9) + if obb is not None: + obstacle_vehicle.o3d_obb = obb + obstacle_vehicle.bounding_box.extent.x = obb.extent[0]/2 + obstacle_vehicle.bounding_box.extent.y = obb.extent[1]/2 + yaw = ego_pos.rotation.yaw - np.degrees(np.arctan2(np.array(obb.R)[1, 0], np.array(obb.R)[0, 0])) + obstacle_vehicle.yaw = yaw + if 'vehicles' in objects: + objects['vehicles'].append(obstacle_vehicle) + else: + objects['vehicles'] = [obstacle_vehicle] + + return objects + +def o3d_camera_lidar_fusion3(objects, + yolo_bbx, + lidar_3d, + projected_lidar, + lidar_sensor, + ego_pos = None): + """ + Utilize the 3D lidar points to extend the 2D bounding box + from camera to 3D bounding box under world coordinates. + + Parameters + ---------- + objects : dict + The dictionary contains all object detection results. + + yolo_bbx : torch.Tensor + Object detection bounding box at current photo from yolov5, + shape (n, 5)->(n, [x1, y1, x2, y2, label]) + + lidar_3d : np.ndarray + Raw 3D lidar points in lidar coordinate system. + + projected_lidar : np.ndarray + 3D lidar points projected to the camera space. + + lidar_sensor : carla.sensor + The lidar sensor. + + ego_pos : carla.transform + The ego vehicle's transform. + Returns + ------- + objects : dict + The update object dictionary that contains 3d bounding boxes. + """ + + # convert torch tensor to numpy array first + if yolo_bbx.is_cuda: + yolo_bbx = yolo_bbx.cpu().detach().numpy() + else: + yolo_bbx = yolo_bbx.detach().numpy() + + for i in range(yolo_bbx.shape[0]): + detection = yolo_bbx[i] + # 2d bbx coordinates + x1, y1, x2, y2 = int(detection[0]), int(detection[1]), \ + int(detection[2]), int(detection[3]) + label = int(detection[5]) + confidence = float(detection[4]) + + # choose the lidar points in the 2d yolo bounding box + points_in_bbx = \ + (projected_lidar[:, 0] > x1) & (projected_lidar[:, 0] < x2) & \ + (projected_lidar[:, 1] > y1) & (projected_lidar[:, 1] < y2) & \ + (projected_lidar[:, 2] > 0.0) + # ignore intensity channel + select_points = lidar_3d[points_in_bbx][:, :-1] + + if select_points.shape[0] == 0: + continue + + min_points_required = { # Map label to minimum required points + 2: 4, # vehicle + 5: 4, # tuck + 7: 4, # bus + 0: 4, # pedestrian + 1: 4, # bicycle + 3: 4, # motorcycle + } + + if select_points.shape[0] < min_points_required.get(label, 0): + continue + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(select_points) + labels = np.array(pcd.cluster_dbscan(eps=0.7, min_points=3, print_progress=False)) + + if label in [1, 3] and (np.unique(labels) != -1).sum() > 1: + # Combine all cluster points if it is a bicycle or motorcycle + # to also include the rider + final_object_points = select_points[labels != -1] + + else: + unique_labels, counts = np.unique(labels[labels != -1], return_counts=True) + if len(unique_labels) == 0: + continue + + main_cluster_label = unique_labels[counts.argmax()] + main_cluster_indices = np.where(labels == main_cluster_label)[0] + final_object_points = select_points[main_cluster_indices] + + + if final_object_points.shape[0] < 4: + continue + + # to visualize 3d lidar points in o3d visualizer, we need to + # revert the x coordinates + final_object_points[:, :1] = -final_object_points[:, :1] + + # create o3d.PointCloud object + o3d_pointcloud = o3d.geometry.PointCloud() + o3d_pointcloud.points = o3d.utility.Vector3dVector(final_object_points) + o3d_pointcloud.paint_uniform_color([0, 1, 1]) + # add o3d bounding box + aabb = o3d_pointcloud.get_axis_aligned_bounding_box() + aabb.color = (1, 0, 0) + + obb = None + if np.asarray(o3d_pointcloud.points).shape[0] >= 4: + try: + obb = o3d_pointcloud.get_oriented_bounding_box() + obb.color = (0, 1, 0) + except RuntimeError as e: + print("Unable to compute the oriented bounding box:", e) + pass + + # get the eight corner of the bounding boxes. + corner = np.asarray(aabb.get_box_points()) + # covert back to unreal coordinate + corner[:, :1] = -corner[:, :1] + corner = corner.transpose() + # extend (3, 8) to (4, 8) for homogenous transformation + corner = np.r_[corner, [np.ones(corner.shape[1])]] + # project to world reference + corner = st.sensor_to_world(corner, lidar_sensor.get_transform()) + corner = corner.transpose()[:, :3] + + if is_vehicle_cococlass(label): + obstacle_vehicle = ObstacleVehicle(corner, obb, confidence=confidence) + if obb is not None: + obstacle_vehicle.o3d_obb = obb + obstacle_vehicle.bounding_box.extent.x = obb.extent[0]/2 + obstacle_vehicle.bounding_box.extent.y = obb.extent[1]/2 + yaw = ego_pos.rotation.yaw - np.degrees(np.arctan2(np.array(obb.R)[1, 0], np.array(obb.R)[0, 0])) + obstacle_vehicle.yaw = yaw + if 'vehicles' in objects: + objects['vehicles'].append(obstacle_vehicle) + else: + objects['vehicles'] = [obstacle_vehicle] + # todo: refine the category + elif label in (0, 1, 3): + obstacle_VRU = ObstacleVRU(corner, obb, confidence=confidence) + if label == 0: + obstacle_VRU.itsType = 'pedestrian' + elif label == 1: + obstacle_VRU.itsType = 'bicycle' + elif label == 3: + obstacle_VRU.itsType = 'motorcycle' + if obb is not None: + obstacle_VRU.o3d_obb = obb + obstacle_VRU.bounding_box.extent.x = obb.extent[0] / 2 + obstacle_VRU.bounding_box.extent.y = obb.extent[1] / 2 + yaw = ego_pos.rotation.yaw - np.degrees(np.arctan2(np.array(obb.R)[1, 0], np.array(obb.R)[0, 0])) + # if label == 0: + # obstacle_VRU.itsType = 'pedestrian' + # elif label == 1: + # obstacle_VRU.itsType = 'bicycle' + # elif label == 3: + # obstacle_VRU.itsType = 'motorcycle' + + obstacle_VRU.yaw = yaw + if 'VRU' in objects: + objects['VRU'].append(obstacle_VRU) + else: + objects['VRU'] = [obstacle_VRU] # we regard or other obstacle rather than vehicle as static class else: static_obstacle = StaticObstacle(corner, aabb) diff --git a/opencda/core/sensing/perception/obstacle_pedestrian.py b/opencda/core/sensing/perception/obstacle_pedestrian.py new file mode 100644 index 000000000..dfe7d48bc --- /dev/null +++ b/opencda/core/sensing/perception/obstacle_pedestrian.py @@ -0,0 +1,235 @@ +# -*- coding: utf-8 -*- +""" +Obstacle pedestrian class to save object detection. +""" + +# Author: Runsheng Xu +# License: TDG-Attribution-NonCommercial-NoDistrib +import sys + +import carla +import numpy as np +import open3d as o3d + +import opencda.core.sensing.perception.sensor_transformation as st +from opencda.core.common.misc import get_speed_sumo + + +class BoundingBox(object): + """ + Bounding box class for obstacle pedestrian. + + Params: + -corners : nd.nparray + Eight corners of the bounding box. (shape:(8, 3)) + Attributes: + -location : carla.location + The location of the object. + -extent : carla.vector3D + The extent of the object. + """ + + def __init__(self, corners): + center_x = np.mean(corners[:, 0]) + center_y = np.mean(corners[:, 1]) + center_z = np.mean(corners[:, 2]) + + extent_x = (np.max(corners[:, 0]) - np.min(corners[:, 0])) / 2 + extent_y = (np.max(corners[:, 1]) - np.min(corners[:, 1])) / 2 + extent_z = (np.max(corners[:, 2]) - np.min(corners[:, 2])) / 2 + + self.location = carla.Location(x=center_x, y=center_y, z=center_z) + self.extent = carla.Vector3D(x=extent_x, y=extent_y, z=extent_z) + + +class ObstacleVRU(object): + """ + A class for obstacle pedestrian. The attributes are designed to match + with carla.Walker class. + + Parameters + ---------- + corners : nd.nparray + Eight corners of the bounding box. shape:(8, 3). + + o3d_bbx : pen3d.AlignedBoundingBox + The bounding box object in Open3d. This is mainly used for + visualization + + vehicle : carla.Vehicle + The carla.Vehicle object. + + lidar : carla.sensor.lidar + The lidar sensor. + + sumo2carla_ids : dict + Sumo to carla mapping dictionary, this is used only when co-simulation + is activated. We need this since the speed info of vehicles that + are controlled by sumo can not be read from carla server. We will + need this dict to read vehicle speed from sumo api--traci. + + Attributes + ---------- + bounding_box : BoundingBox + Bounding box of the osbject vehicle. + + location : carla.location + Location of the object. + + carla_id : int + The obstacle pedestrian's id. It should be the same with the + corresponding carla.walker's id. If no carla walker is + matched with the obstacle pedestrian, it should be -1. + """ + + def __init__(self, corners, o3d_bbx, + VRU=None, lidar=None, sumo2carla_ids=None, confidence=0.0, itsType = 'pedestrian'): + + self.o3d_obb = None + if not VRU: + self.bounding_box = BoundingBox(corners) + self.location = self.bounding_box.location + # todo: next version will add rotation estimation + self.transform = None + self.o3d_bbx = o3d_bbx + self.carla_id = -1 + self.velocity = carla.Vector3D(0.0, 0.0, 0.0) + self.yaw = 0.0 + self.itsType = 'pedestrian' + self.confidence = confidence + else: + if sumo2carla_ids is None: + sumo2carla_ids = dict() + self.set_VRU(VRU, lidar, sumo2carla_ids) + self.confidence = 100.0 # They are ground truth perceptions + + def get_transform(self): + """ + Return the transform of the object vehicle. + """ + return self.transform + + def get_location(self): + """ + Return the location of the object vehicle. + """ + return self.location + + def get_velocity(self): + """ + Return the velocity of the object vehicle. + """ + return self.velocity + + def set_carla_id(self, id): + """ + Set carla id according to the carla.vehicle. + + Parameters + ---------- + id : int + The id from the carla.vehicle. + """ + self.carla_id = id + + def set_velocity(self, velocity): + """ + Set the velocity of the vehicle. + + Parameters + ---------- + velocity : carla.Vector3D + The target velocity in 3d vector format. + + """ + self.velocity = velocity + + def set_VRU(self, VRU, lidar, sumo2carla_ids): + """ + Assign the attributes from carla.Walker to ObstaclePedestrian. + + Parameters + ---------- + VRU : carla.Walker + The carla.Walker object. + + sumo2carla_ids : dict + Sumo to carla mapping dictionary, this is used only when + co-simulation is activated. We need this since the speed info of + vehicles that are controlled by sumo can not be read from carla + server. We will need this dict to read vehicle speed + from sumo api--traci. + """ + self.location = VRU.get_location() + self.transform = VRU.get_transform() + self.bounding_box = VRU.bounding_box + self.carla_id = VRU.id + self.type_id = VRU.type_id + self.set_velocity(VRU.get_velocity()) + + # the vehicle controlled by sumo has speed 0 in carla, + # thus we need to retrieve the correct number from sumo + if len(sumo2carla_ids) > 0: + sumo_speed = get_speed_sumo(sumo2carla_ids, self.carla_id) + if sumo_speed > 0: + # todo: consider the yaw angle in the future + speed_vector = carla.Vector3D(sumo_speed, 0, 0) + self.set_velocity(speed_vector) + + yaw = np.deg2rad(self.transform.rotation.yaw) + c, s = np.cos(yaw), np.sin(yaw) + R = np.array([[c, -s], [s, c]]) + local_corners = np.array([ + [-self.bounding_box.extent.x, -self.bounding_box.extent.y], + [self.bounding_box.extent.x, -self.bounding_box.extent.y], + [self.bounding_box.extent.x, self.bounding_box.extent.y], + [-self.bounding_box.extent.x, self.bounding_box.extent.y] + ]) + world_corners = np.dot(R, local_corners.T).T + np.array([self.location.x, self.location.y]) + + + # find the min and max boundary + # min_boundary = np.array([self.location.x - self.bounding_box.extent.x, + # self.location.y - self.bounding_box.extent.y, + # self.location.z + self.bounding_box.location.z + # - self.bounding_box.extent.z, + # 1]) + # max_boundary = np.array([self.location.x + self.bounding_box.extent.x, + # self.location.y + self.bounding_box.extent.y, + # self.location.z + self.bounding_box.location.z + # + self.bounding_box.extent.z, + # 1]) + min_boundary = np.array([np.min(world_corners, axis=0)[0], + np.min(world_corners, axis=0)[1], + self.location.z + self.bounding_box.location.z + - self.bounding_box.extent.z, + 1]) + max_boundary = np.array([np.max(world_corners, axis=0)[0], + np.max(world_corners, axis=0)[1], + self.location.z + self.bounding_box.location.z + + self.bounding_box.extent.z, + 1]) + min_boundary = min_boundary.reshape((4, 1)) + max_boundary = max_boundary.reshape((4, 1)) + stack_boundary = np.hstack((min_boundary, max_boundary)) + + if lidar is None: + return + # the boundary coord at the lidar sensor space + stack_boundary_sensor_cords = st.world_to_sensor(stack_boundary, + lidar.get_transform()) + # convert unreal space to o3d space + stack_boundary_sensor_cords[:1, :] = - \ + stack_boundary_sensor_cords[:1, :] + # (4,2) -> (3, 2) + stack_boundary_sensor_cords = stack_boundary_sensor_cords[:-1, :] + + min_boundary_sensor = np.min(stack_boundary_sensor_cords, axis=1) + max_boundary_sensor = np.max(stack_boundary_sensor_cords, axis=1) + + aabb = \ + o3d.geometry.AxisAlignedBoundingBox(min_bound=min_boundary_sensor, + max_bound=max_boundary_sensor) + aabb.color = (1, 0, 0) + self.o3d_bbx = aabb + diff --git a/opencda/core/sensing/perception/obstacle_vehicle.py b/opencda/core/sensing/perception/obstacle_vehicle.py index 29ed51e6a..63caea156 100644 --- a/opencda/core/sensing/perception/obstacle_vehicle.py +++ b/opencda/core/sensing/perception/obstacle_vehicle.py @@ -24,8 +24,18 @@ def is_vehicle_cococlass(label): Returns: -is_vehicle(bool): Whether this label belongs to the vehicle class + + 0 - pedestrian + 1 - bicycle + 2 - car + 3 - motorcycle + 4 - airplane + 5 - bus + 6 - train + 7 - truck """ - vehicle_class_array = np.array([1, 2, 3, 5, 7], dtype=np.int) + + vehicle_class_array = np.array([2, 5, 7], dtype=np.int) return True if 0 in (label - vehicle_class_array) else False class BoundingBox(object): @@ -112,6 +122,7 @@ def __init__(self, corners, o3d_bbx, self.velocity = carla.Vector3D(0.0, 0.0, 0.0) self.yaw = 0.0 self.confidence = confidence + self.itsType = 'vehicle' else: if sumo2carla_ids is None: sumo2carla_ids = dict() diff --git a/opencda/core/sensing/perception/perception_manager.py b/opencda/core/sensing/perception/perception_manager.py index a2f3b2d1b..cf810a5fa 100644 --- a/opencda/core/sensing/perception/perception_manager.py +++ b/opencda/core/sensing/perception/perception_manager.py @@ -14,18 +14,27 @@ import cv2 import numpy as np import open3d as o3d +import os +import csv import random +import threading +import pandas as pd import opencda.core.sensing.perception.sensor_transformation as st from opencda.core.common.misc import \ cal_distance_angle, get_speed, get_speed_sumo from opencda.core.sensing.perception.obstacle_vehicle import \ ObstacleVehicle +from opencda.core.sensing.perception.obstacle_pedestrian import \ + ObstacleVRU from opencda.core.sensing.perception.static_obstacle import TrafficLight from opencda.core.sensing.perception.o3d_lidar_libs import \ o3d_visualizer_init, o3d_pointcloud_encode, o3d_visualizer_show, \ - o3d_camera_lidar_fusion + o3d_camera_lidar_fusion , o3d_camera_lidar_fusion2, o3d_camera_lidar_fusion3 from sklearn.cluster import DBSCAN +from scipy.optimize import linear_sum_assignment +from scipy.spatial.distance import euclidean +# from opencda.customize.v2x.LDMutils import compute_IoU class CameraSensor: """ @@ -201,6 +210,9 @@ def __init__(self, vehicle, world, config_yaml, global_position): # open3d point cloud object self.o3d_pointcloud = o3d.geometry.PointCloud() + # Create a directory to store the output files + # self.output_path = "lidar_output" + # os.makedirs(self.output_path, exist_ok=True) weak_self = weakref.ref(self) self.sensor.listen( lambda event: LidarSensor._on_data_event( @@ -218,6 +230,16 @@ def _on_data_event(weak_self, event): # (x, y, z, intensity) data = np.reshape(data, (int(data.shape[0] / 4), 4)) + # 2. Update the Open3D point cloud object + # We only need the (x, y, z) coordinates for the geometry + # points = data[:, :3] + # self.o3d_pointcloud.points = o3d.utility.Vector3dVector(points) + # + # # 3. Save the point cloud to a .pcd file + # # Use the frame number to create a unique filename for each frame + # filename = os.path.join(self.output_path, f"{event.frame:06d}.pcd") + # o3d.io.write_point_cloud(filename, self.o3d_pointcloud) + self.data = data self.frame = event.frame self.timestamp = event.timestamp @@ -535,6 +557,33 @@ def __init__(self, vehicle, config_yaml, cav_world, self.traffic_thresh = config_yaml['traffic_light_thresh'] \ if 'traffic_light_thresh' in config_yaml else 50 + # --- CSV Configuration --- + self.EXCLUDED_PED_COUNT_CSV_FILE = "excluded_pedestrian_counts.csv" + self.EXCLUDED_PED_COUNT_HEADERS = [ + "timestamp", + "perceiver_id", + "excluded_pedestrian_count" + ] + self.confidence_csv_file = "Confidence_level_all_PO.csv" + self.confidence_csv_header = [ + "timestamp", + "confidence", + "ID" + ] + # self.detection_csv_file = "Detection_parameters.csv" + # self.detection_csv_header = [ + # "true_positives", + # "false_negatives", + # "false_positives" + # ] + + # --- Threading Lock for CSV writing --- + # If you have other CSV writing operations, ensure locks are used appropriately + # to prevent deadlocks or if a single lock can manage all CSV access. + # For this specific file, we use this lock: + self.excluded_ped_count_csv_lock = threading.Lock() + self.confidence_csv_lock = threading.Lock() + def dist(self, a): """ A fast method to retrieve the obstacle distance the ego @@ -555,6 +604,7 @@ def dist(self, a): def detect(self, ego_pos): """ Detect surrounding objects. Currently only vehicle detection supported. + Implementation for pedestrians detection ongoing. Parameters ---------- @@ -570,7 +620,8 @@ def detect(self, ego_pos): self.ego_pos = ego_pos objects = {'vehicles': [], - 'traffic_lights': []} + 'traffic_lights': [], + 'VRU': []} if not self.activate: objects = self.deactivate_mode(objects) @@ -648,6 +699,8 @@ def radar_detect(self, objects): vehicle_list = [v for v in vehicle_list if self.dist(v) < 50 and v.id != self.id] + ped_list = world.get_actors() + ped_list = [p for p in ped_list if self.dist(p) < 50 and p.id != self.id] # Step 5: Create and Draw Bounding Boxes for label in unique_labels: @@ -684,10 +737,16 @@ def radar_detect(self, objects): bbox_corners[:, 1] += self.radar.sensor.get_transform().location.y bbox_corners[:, 2] += self.radar.sensor.get_transform().location.z + + obstacle_vehicle = ObstacleVehicle(bbox_corners, o3d_bbx, confidence=0.71) obstacle_vehicle.set_velocity( carla.Vector3D(self.vehicle.get_velocity().x + cluster_points.mean(axis=0)[3], 0, 0)) + # obstacle_VRU = ObstacleVRU(bbox_corners, o3d_bbx, confidence=0.71) + # obstacle_VRU.set_velocity( + # carla.Vector3D(self.vehicle.get_velocity().x + cluster_points.mean(axis=0)[3], 0, 0)) + for v in vehicle_list: loc = v.get_location() obstacle_loc = obstacle_vehicle.get_location() @@ -695,8 +754,12 @@ def radar_detect(self, objects): abs(loc.y - obstacle_loc.y) <= 3.0: obstacle_vehicle.carla_id = v.id + # for p in in ped_list: + # p_loc = p.get_location() + objects['vehicles'].append(obstacle_vehicle) + return objects @@ -751,7 +814,7 @@ def activate_mode(self, objects): rgb_draw_images.append(rgb_image) # camera lidar fusion - objects = o3d_camera_lidar_fusion( + objects = o3d_camera_lidar_fusion3( objects, yolo_detection.xyxy[i], data_copy, @@ -759,11 +822,20 @@ def activate_mode(self, objects): self.lidar.sensor, self.ego_pos) + # objects = o3d_camera_lidar_fusion2( + # objects, + # yolo_detection.xyxy[i], + # data_copy, + # projected_lidar, + # self.lidar.sensor, + # self.ego_pos) + # calculate the speed. current we retrieve from the server # directly. + # this function is used also to set the id self.speed_retrieve(objects) - self.radar_detect(objects) + #self.radar_detect(objects) fusion_time = time.time_ns() @@ -774,30 +846,45 @@ def activate_mode(self, objects): break rgb_image = self.ml_manager.draw_2d_box( yolo_detection, rgb_image, i) - rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.4, fy=0.4) + rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.8, fy=0.8) cv2.imshow( '%s-th camera of actor %d, perception activated' % (str(i), self.id), rgb_image) cv2.waitKey(1) - objects['vehicles'] = [item for item in objects['vehicles'] if item.confidence >= 0.7] - duplicate_indices = set() - # Iterate through the objects to check for duplicates - for i in range(len(objects['vehicles'])): - for j in range(i + 1, len(objects['vehicles'])): - dist = math.sqrt(pow(objects['vehicles'][i].location.x - objects['vehicles'][j].location.x, 2) - + pow(objects['vehicles'][i].location.y - objects['vehicles'][j].location.y, 2)) - if dist < 3 or objects['vehicles'][i].carla_id == objects['vehicles'][j].carla_id: - # if (objects['vehicles'][i].bounding_box.extent.x*objects['vehicles'][i].bounding_box.extent.y) > \ - # (objects['vehicles'][j].bounding_box.extent.x * objects['vehicles'][j].bounding_box.extent.y): - if objects['vehicles'][i].confidence > objects['vehicles'][j].confidence: - duplicate_indices.add(j) - else: - duplicate_indices.add(i) - - # Remove duplicate objects from the list - for index in sorted(duplicate_indices, reverse=True): - objects['vehicles'].pop(index) + for key in objects: + if key == 'static': + continue + if key == "vehicle": + objects[key] = [item for item in objects[key] if item.confidence >= 0.7] + if key == "VRU": + current_list = objects[key] + vrus_to_be_excluded = [ + item for item in current_list if item.confidence < 0.4 and item.carla_id != -1] + excluded_count = len(vrus_to_be_excluded) + self.log_excluded_pedestrian_count(self.id, excluded_count) + for item in current_list: + self.log_confidence_level(item.confidence, item.carla_id) + + objects[key] = [item for item in objects[key] if item.confidence >= 0.4] + + duplicate_indices = set() + # Iterate through the objects to check for duplicates + for i in range(len(objects[key])): + for j in range(i + 1, len(objects[key])): + dist = math.sqrt(pow(objects[key][i].location.x - objects[key][j].location.x, 2) + + pow(objects[key][i].location.y - objects[key][j].location.y, 2)) + if (dist < 2 and key == 'vehicles') or objects[key][i].carla_id == objects[key][j].carla_id: + # if (objects['vehicles'][i].bounding_box.extent.x*objects['vehicles'][i].bounding_box.extent.y) > \ + # (objects['vehicles'][j].bounding_box.extent.x * objects['vehicles'][j].bounding_box.extent.y): + if objects[key][i].confidence > objects[key][j].confidence: + duplicate_indices.add(j) + else: + duplicate_indices.add(i) + + # Remove duplicate objects from the list + for index in sorted(duplicate_indices, reverse=True): + objects[key].pop(index) if self.lidar_visualize: while self.lidar.data is None: @@ -918,14 +1005,18 @@ def getGTobjects(self): world = self.carla_world vehicle_list = world.get_actors().filter("*vehicle*") + VRU_list = world.get_actors().filter("*pedestrian*") # todo: hard coded thresh = 75 if self.ego_pos: vehicle_list = [v for v in vehicle_list if self.dist(v) < thresh and v.id != self.id] + VRU_list = [p for p in VRU_list if self.dist(p) < thresh and + p.id != self.id] else: vehicle_list = [v for v in vehicle_list if v.id != self.id] + VRU_list = [p for p in VRU_list if p.id != self.id] # convert carla.Vehicle to opencda.ObstacleVehicle if lidar # visualization is required. @@ -937,6 +1028,13 @@ def getGTobjects(self): v, self.lidar.sensor, None) for v in vehicle_list] + VRU_list = [ + ObstacleVRU( + None, + None, + p, + self.lidar.sensor, + None) for p in VRU_list] else: vehicle_list = [ ObstacleVehicle( @@ -945,8 +1043,15 @@ def getGTobjects(self): v, None, self.cav_world.sumo2carla_ids) for v in vehicle_list] + VRU_list = [ + ObstacleVRU( + None, + None, + p, + None, + self.cav_world.sumo2carla_ids) for p in VRU_list] - objects = {'vehicles': vehicle_list} + objects = {'vehicles': vehicle_list, 'VRU': VRU_list} # add traffic light objects = self.retrieve_traffic_lights(objects) @@ -1038,6 +1143,8 @@ def speed_retrieve(self, objects): """ if 'vehicles' not in objects: return + if 'VRU' not in objects: + return world = self.carla_world vehicle_list = world.get_actors().filter("*vehicle*") @@ -1046,6 +1153,7 @@ def speed_retrieve(self, objects): # todo: consider the minimum distance to be safer in next version for v in vehicle_list: + loc = v.get_location() for obstacle_vehicle in objects['vehicles']: obstacle_speed = get_speed(obstacle_vehicle) @@ -1058,7 +1166,7 @@ def speed_retrieve(self, objects): abs(loc.y - obstacle_loc.y) <= 3.0: obstacle_vehicle.set_velocity(v.get_velocity()) - # the case where the obstacle vehicle is controled by + # the case where the obstacle vehicle is controlled by # sumo if self.cav_world.sumo2carla_ids: sumo_speed = \ @@ -1071,6 +1179,154 @@ def speed_retrieve(self, objects): obstacle_vehicle.set_carla_id(v.id) + allactor = world.get_actors() + VRU_list = [] + blueprint_library = world.get_blueprint_library() + for actor in allactor: + try: + bp = blueprint_library.find(actor.type_id) + except: + continue + # actor = world.get_actor(actor.id) + # actor.attributes.object_type + # actor type id has no motorcycle or bicycle + # todo: check for blueprint attributes + if bp.has_attribute("number_of_wheels"): + if actor.attributes['number_of_wheels'] == '2': + VRU_list.append(actor) + elif 'pedestrian' in actor.type_id: + VRU_list.append(actor) + VRU_list = [p for p in VRU_list if self.dist(p) < 40 and + p.id != self.id] + + # -----Hungarian algorithm----- + if objects['VRU']: + MAX_DISTANCE_THRESHOLD = 1 + MAX_IOU_THRESHOLD = 0.3 + + num_gt = len(VRU_list) + num_perceived = len(objects['VRU']) + # Create an N_gt x N_perceived cost matrix + # Initialize with a very high cost for impossible matches + cost_matrix = np.full((num_gt, num_perceived), 1000.0) + # cost_matrix_IoU = np.full((num_gt, num_perceived), 0.0) + matched_ids_this_cycle = set() + for i, p in enumerate(VRU_list): + gt_loc = p.get_location() + + + for j, obstacle_ped in enumerate(objects['VRU']): + obstacle_loc = obstacle_ped.get_location() + compatible = False + if obstacle_ped.itsType == 'pedestrian' and 'pedestrian' in p.type_id: + compatible = True + elif obstacle_ped.itsType != 'pedestrian' and 'pedestrian' not in p.type_id: + if p.attributes["number_of_wheels"] == '2': + compatible = True + + if not compatible: + continue + + # squared euclidean distance + dx = gt_loc.x - obstacle_loc.x + dy = gt_loc.y - obstacle_loc.y + distance_sq = dx * dx + dy * dy + distance = np.sqrt(distance_sq) + current_cost = distance + + # check proximity + if distance <= MAX_DISTANCE_THRESHOLD * 3: + cost_matrix[i, j] = current_cost + + # Todo: consider also IoU for the cost matrix (and mAP calculation) + #IoU cost matrix + # bbx = p.bounding_box + # width = bbx.extent.x*2 + # length = bbx.extent.y*2 + # heigth = bbx.extent.z*2 + # yaw = p.get_transform().rotation.yaw + # lidar_transform = self.lidar.sensor.get_transform() + # gt_o3d_aabb, gt_obb = self.get_o3d_bounding_boxes(gt_loc.x, gt_loc.y,0, width, length, heigth, + # yaw, lidar_transform) + # detected_o3d_aabb, detected_obb = self.get_o3d_bounding_boxes(obstacle_loc.x, obstacle_loc.y, 0, + # obstacle_ped.bounding_box.extent.x, + # obstacle_ped.bounding_box.extent.y, + # obstacle_ped.bounding_box.extent.z, + # obstacle_ped.yaw,lidar_transform) + # IoU = self.compute_obb_iou(gt_obb, detected_obb) + # cost_matrix_IoU[i, j] = IoU + + + # Perform the optimal assignment using the Hungarian algorithm + row_ind, col_ind = linear_sum_assignment(cost_matrix) + print(f"[{self.vehicle.get_world().get_snapshot().elapsed_seconds}] ") + # print("Cost matrix:\n", cost_matrix[:, :]) + # print("Cost matrix IoU:\n", cost_matrix_IoU[:, :]) + true_positives = 0 + false_positives = 0 + false_negatives = 0 + matched_gt_ids_this_cycle = set() + matched_perceived_ids_this_cycle = set() + + # row_ids contains indices of gt objects + # col_ind contains indices of perceived matched objects + for gt_idx, perceived_idx in zip(row_ind, col_ind): + cost = cost_matrix[gt_idx, perceived_idx] + + is_valid_match = False + if cost != 1000.0 and cost <= MAX_DISTANCE_THRESHOLD: # Check distance threshold + perceived_obj = objects['VRU'][perceived_idx] + gt_obj = VRU_list[gt_idx] + is_valid_match = True + # print(f"ID: {gt_obj.id} - cost = {cost} - confidence = {perceived_obj.confidence}") + + if is_valid_match: + true_positives += 1 + gt_obj = VRU_list[gt_idx] + best_matching_obj = objects['VRU'][perceived_idx] + + # Update the perceived object with ground truth information + actor = allactor.find(gt_obj.id) + if actor: + aspeed = actor.get_velocity() + best_matching_obj.velocity.x = aspeed.x + best_matching_obj.velocity.y = aspeed.y + + atransform = actor.get_transform() + yaw = atransform.rotation.yaw + # Log heading difference if needed + self.log_heading_to_excel(time, gt_obj.id, yaw, best_matching_obj.yaw) + + best_matching_obj.yaw = yaw + + best_matching_obj.set_carla_id(gt_obj.id) # Assign the Carla ID from GT for tracking + + matched_gt_ids_this_cycle.add(gt_obj.id) + matched_perceived_ids_this_cycle.add(best_matching_obj.carla_id) + + # Calculate False Negatives (GT objects not matched) + # extremely dependent on GT list + # todo: address false negatives - which GT I SHOULD be detecting? gt_list is not enough + for gt_obj in VRU_list: + if gt_obj.id not in matched_gt_ids_this_cycle: + false_negatives += 1 + + # Calculate False Positives (Perceived objects not matched) + for perceived_obj in objects['VRU']: + if perceived_obj.carla_id not in matched_perceived_ids_this_cycle: + if perceived_obj.carla_id == -1: + false_positives += 1 + else: # If it got a Carla ID but wasn't in our current matched list, something went wrong + # This assumes all matched perceived_obj will have their carla_id set + pass + + # self.log_detection_par(true_positives, false_negatives, false_positives) + # print(f"True Positives: {true_positives}") + # print(f"False Negatives: {false_negatives}") + # print(f"False Positives: {false_positives}") + + + def retrieve_traffic_lights(self, objects): """ Retrieve the traffic lights nearby from the server directly. @@ -1153,3 +1409,228 @@ def destroy(self): if self.data_dump: self.semantic_lidar.sensor.destroy() + + # def get_o3d_bounding_boxes(self, x, y, z, extent_x, extent_y, extent_z, heading_deg, lidar_transform): + # """ + # Creates an Open3D Oriented Bounding Box (OBB) and its corresponding + # Axis-Aligned Bounding Box (AABB) in the sensor's coordinate frame + # by directly transforming the OBB object. + # + # Args: + # x, y, z (float): Center of the box in world coordinates. + # extent_x, extent_y, extent_z (float): The full size (length, width, height) of the box. + # heading_deg (float): The yaw/heading of the box in degrees. + # lidar_transform (carla.Transform): The transform of the sensor. + # + # Returns: + # tuple: (o3d.geometry.AxisAlignedBoundingBox, o3d.geometry.OrientedBoundingBox) + # """ + # # Use the input parameters to define the OBB directly. + # center_world = [x, y, z] + # extent_world = [extent_x, extent_y, extent_z] + # + # # Create the rotation matrix from the yaw angle + # yaw_rad = np.deg2rad(heading_deg) + # R = o3d.geometry.get_rotation_matrix_from_xyz((0, 0, yaw_rad)) + # + # # Create the Open3D OBB object in world coordinates + # obb_world = o3d.geometry.OrientedBoundingBox(center_world, R, extent_world) + # + # # 1. Get the 8 corners of the bounding box + # corner_points_world = np.asarray(obb_world.get_box_points()) # shape (8, 3) + # + # # 2. Transpose the array to have shape (3, 8) + # corner_points_world_transposed = corner_points_world.transpose() + # + # # 3. Add a row of ones to create homogeneous coordinates, shape (4, 8) + # homogeneous_points = np.vstack((corner_points_world_transposed, np.ones(8))) + # + # # Now you can use this as input for your world_to_sensor function + # sensor_cords = st.world_to_sensor(homogeneous_points, lidar_transform) + # + # sensor_cords[:1, :] = - sensor_cords[:1, :] + # sensor_cords = sensor_cords[:-1, :] + # sensor_cords = sensor_cords.transpose() + # + # obb_points = o3d.utility.Vector3dVector(sensor_cords) + # + # # 4. Now, create the AABB and visualization objects from the final, + # # correctly transformed OBB, which is the most robust method. + # aabb_sensor = o3d.geometry.AxisAlignedBoundingBox.create_from_points(obb_points) + # aabb_sensor.color = (0, 1, 0) # Green for AABB + # + # obb_sensor = o3d.geometry.OrientedBoundingBox.create_from_points(obb_points) + # obb2 = o3d.geometry.OrientedBoundingBox.create_from_axis_aligned_bounding_box(aabb_sensor) + # + # + # # This is an alternative to the line_set creation, but it is more efficient + # # line_set_sensor = o3d.geometry.LineSet.create_from_oriented_bounding_box(obb_sensor) + # # line_set_sensor.paint_uniform_color((1, 0, 0)) # Red for OBB outline + # + # # Returning the OBB is critical for your IoU calculation + # return aabb_sensor, obb_sensor + + # def compute_obb_iou(self, obb1, obb2): + # """ + # Computes the Intersection over Union (IoU) for two Open3D OrientedBoundingBoxes. + # + # This method works by converting the OBBs to meshes, calculating the + # volume of their intersection, and then applying the IoU formula. + # + # Note: Requires Open3D version >= 0.16.0 for mesh boolean operations. + # """ + # try: + # mesh1 = o3d.geometry.TriangleMesh.create_from_oriented_bounding_box(obb1) + # mesh2 = o3d.geometry.TriangleMesh.create_from_oriented_bounding_box(obb2) + # except AttributeError: + # # Fallback for older versions if needed, though this is less likely + # print("Warning: create_from_oriented_bounding_box not found. " + # "Check your Open3D version.") + # return 0.0 + # + # # Get the volumes of the original boxes + # volume1 = obb1.volume() + # volume2 = obb2.volume() + # + # # Calculate the intersection mesh + # # The intersection operation might fail for some geometries, so we use a try-except block + # try: + # intersection_mesh = mesh1.boolean_intersection(mesh2) + # intersection_volume = intersection_mesh.get_volume() + # except Exception as e: + # # If the boolean operation fails, it often means there's no intersection + # # or the intersection is a lower-dimensional shape (line/point). + # # print(f"Mesh intersection failed: {e}") + # intersection_volume = 0.0 + # + # # Calculate the union volume + # union_volume = volume1 + volume2 - intersection_volume + # + # # Compute the IoU + # if union_volume == 0: + # return 0.0 # Avoid division by zero + # else: + # return intersection_volume / union_volume + + def log_excluded_pedestrian_count(self, perceiver_id_val, count_of_excluded_peds): + """ + Logs the timestamp, perceiver ID, and the count of excluded pedestrians + to a CSV file in a thread-safe manner. + + Args: + perceiver_id_val: The ID of the perceiving entity (e.g., self.id). + count_of_excluded_peds: The number of pedestrians excluded in this step. + """ + # timestamp = datetime.datetime.now().isoformat() + timestamp = time.time_ns() + data_row = [timestamp, perceiver_id_val, count_of_excluded_peds] + + # Acquire the lock before performing file operations + with self.excluded_ped_count_csv_lock: + try: + file_exists = os.path.isfile(self.EXCLUDED_PED_COUNT_CSV_FILE) + # Check if file is empty (it might exist but have no content/headers) + is_empty_file = file_exists and os.path.getsize(self.EXCLUDED_PED_COUNT_CSV_FILE) == 0 + + with open(self.EXCLUDED_PED_COUNT_CSV_FILE, mode='a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists or is_empty_file: + # Write header only if it's a new or empty file + writer.writerow(self.EXCLUDED_PED_COUNT_HEADERS) + writer.writerow(data_row) + except IOError as e: + print( + f"Thread ID {threading.get_ident()}: IOError writing to {self.EXCLUDED_PED_COUNT_CSV_FILE}. Reason: {e}") + except Exception as e: + print( + f"Thread ID {threading.get_ident()}: Unexpected error during CSV writing to {self.EXCLUDED_PED_COUNT_CSV_FILE}. Reason: {e}") + + def log_confidence_level(self, confidence, obj_id): + """ + Logs the timestamp, perceiver ID, and the confidence level + to a CSV file in a thread-safe manner. + + Args: + confidence: confidence level from YOLOv5 detection. + obj_id: id of the perceived object. + """ + # timestamp = datetime.datetime.now().isoformat() + timestamp = time.time_ns() + data_row = [timestamp, confidence, obj_id] + + # Acquire the lock before performing file operations + with self.confidence_csv_lock: + try: + file_exists = os.path.isfile(self.confidence_csv_file) + # Check if file is empty (it might exist but have no content/headers) + is_empty_file = file_exists and os.path.getsize(self.confidence_csv_file) == 0 + + with open(self.confidence_csv_file, mode='a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists or is_empty_file: + # Write header only if it's a new or empty file + writer.writerow(self.confidence_csv_header) + writer.writerow(data_row) + except IOError as e: + print( + f"Thread ID {threading.get_ident()}: IOError writing to {self.confidence_csv_file}. Reason: {e}") + except Exception as e: + print( + f"Thread ID {threading.get_ident()}: Unexpected error during CSV writing to {self.confidence_csv_file}. Reason: {e}") + + def log_heading_to_excel(self, time, actor_id, ground_truth_heading, perceived_heading): + + header = ['timestamp','ground_truth_heading', 'perceived_heading'] + + df = pd.DataFrame([{ + 'timestamp': time.time_ns(), + 'ground_truth_heading': ground_truth_heading, + 'perceived_heading': perceived_heading + }]) + + + log_directory = "Heading_difference" + if not os.path.exists(log_directory): + os.makedirs(log_directory) + + file_name = os.path.join(log_directory, f"heading_{actor_id}.csv") + + write_header = not os.path.exists(file_name) + + df.to_csv(file_name, mode='a', header=write_header, index=False) + # Check if the file exists + + def log_detection_par(self, true_positives, false_negatives, false_positives): + """ + Logs the detection parameters for mAP calculation + to a CSV file in a thread-safe manner. + + Args: + true_positives: detected and matched + false_negatives: GT objects not matched + false_positives: Perceived objects not matched) + + """ + # timestamp = datetime.datetime.now().isoformat() + timestamp = time.time_ns() + data_row = [true_positives, false_negatives, false_positives] + + # Acquire the lock before performing file operations + with self.confidence_csv_lock: + try: + file_exists = os.path.isfile(self.detection_csv_file) + # Check if file is empty (it might exist but have no content/headers) + is_empty_file = file_exists and os.path.getsize(self.detection_csv_file) == 0 + + with open(self.detection_csv_file, mode='a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists or is_empty_file: + # Write header only if it's a new or empty file + writer.writerow(self.detection_csv_header) + writer.writerow(data_row) + except IOError as e: + print( + f"Thread ID {threading.get_ident()}: IOError writing to {self.detection_csv_file}. Reason: {e}") + except Exception as e: + print( + f"Thread ID {threading.get_ident()}: Unexpected error during CSV writing to {self.detection_csv_file}. Reason: {e}") diff --git a/opencda/customize/core/common/vehicle_manager.py b/opencda/customize/core/common/vehicle_manager.py index f8c76839f..4c10d1e11 100644 --- a/opencda/customize/core/common/vehicle_manager.py +++ b/opencda/customize/core/common/vehicle_manager.py @@ -29,6 +29,10 @@ from opencda.customize.platooning.states import FSM from opencda.customize.platooning.states import I_FSM import time +import collections +import csv +import os +from pathlib import Path class ExtendedVehicleManager(VehicleManager): @@ -160,6 +164,8 @@ def __init__( if not self.pldm: self.LDM = LDM(self, self.v2xAgent, visualize=self.lidar_visualize) + + def get_time_ms(self): return self.map_manager.world.get_snapshot().elapsed_seconds * 1000 @@ -180,6 +186,7 @@ def update_info_LDM(self): objects = self.perception_manager.detect(ego_pos) self.sensorObjects = objects['vehicles'] detected_n = len(objects['vehicles']) + file_detection = (time.time_ns() / 1000) - file_timestamp # ------------------LDM patch------------------------ self.time = self.map_manager.world.get_snapshot().elapsed_seconds @@ -246,13 +253,13 @@ def get_context(self): def translateDetections(self, object_list): ego_pos, ego_spd, objects = self.getInfo() - returnedObjects = [] + returnedObjectsV = [] for obj in object_list['vehicles']: if obj.carla_id == -1: continue # If object can't be matched with a CARLA vehicle, we ignore it - dist = math.sqrt( - math.pow((obj.location.x - ego_pos.location.x), 2) + math.pow((obj.location.y - ego_pos.location.y), - 2)) + # dist = math.sqrt( + # math.pow((obj.location.x - ego_pos.location.x), 2) + math.pow((obj.location.y - ego_pos.location.y), + # 2)) # yaw = self.perception_manager.carla_world.get_actors().find(obj.carla_id).get_transform().rotation.yaw # print("Carla yaw: ", yaw, " Opencda yaw: ", obj.yaw) LDMobj = Perception(obj.location.x, @@ -263,13 +270,87 @@ def translateDetections(self, object_list): obj.confidence) LDMobj.xSpeed = obj.velocity.x LDMobj.ySpeed = obj.velocity.y - # LDMobj.yaw = obj.yaw - # TODO: this is a workaround, pose estimation in o3d_pointcloud.get_oriented_bounding_box() is not reliable - curr_wpt = self.map_manager.world.get_map().get_waypoint(carla.Location(x=obj.location.x, y=obj.location.y, z=0.0)) - LDMobj.yaw = curr_wpt.transform.rotation.yaw + LDMobj.yaw = obj.yaw + LDMobj.id = obj.carla_id + returnedObjectsV.append(LDMobj) + + returnedObjectsP = [] + for obj in object_list['VRU']: + if obj.carla_id == -1: + continue + # dist = math.sqrt( + # math.pow((obj.location.x - ego_pos.location.x), 2) + math.pow((obj.location.y - ego_pos.location.y), + # 2)) + # yaw = self.perception_manager.carla_world.get_actors().find(obj.carla_id).get_transform().rotation.yaw + # print("Carla yaw: ", yaw, " Opencda yaw: ", obj.yaw) + LDMobj = Perception(obj.location.x, + obj.location.y, + obj.bounding_box.extent.y * 2, # width + obj.bounding_box.extent.x * 2, # length + self.time, + obj.confidence, + itsType=obj.itsType) + LDMobj.xSpeed = obj.velocity.x + LDMobj.ySpeed = obj.velocity.y + LDMobj.yaw = obj.yaw + LDMobj.id = obj.carla_id + returnedObjectsP.append(LDMobj) + # print(f"Updating LDM - ID {obj.carla_id} ") + # ped_list is retrieved from CARLA, while returnedObjects is the info from sensors. + # if we sense a pedestrian close to a location where the server is telling us that there is a pedestrian + # we assign the ID to the object obstacle pedestrian + + + return {'vehicles': returnedObjectsV, + 'VRU': returnedObjectsP} + def translateDetectionsPedestrians(self, object_list): + ego_pos, ego_spd, objects = self.getInfo() + returnedObjects = [] + for obj in object_list['VRU']: + # dist = math.sqrt( + # math.pow((obj.location.x - ego_pos.location.x), 2) + math.pow((obj.location.y - ego_pos.location.y), + # 2)) + # yaw = self.perception_manager.carla_world.get_actors().find(obj.carla_id).get_transform().rotation.yaw + # print("Carla yaw: ", yaw, " Opencda yaw: ", obj.yaw) + if obj.itsType != 'pedestrian': + continue + LDMobj = Perception(obj.location.x, + obj.location.y, + obj.bounding_box.extent.y * 2, + obj.bounding_box.extent.x * 2, + self.time, + obj.confidence, + itsType=obj.itsType) + LDMobj.xSpeed = 0 + LDMobj.ySpeed = 0 + LDMobj.yaw = 0 LDMobj.id = obj.carla_id returnedObjects.append(LDMobj) - return {'vehicles': returnedObjects} + + # ped_list is retrieved from CARLA, while returnedObjects is the info from sensors. + # if we sense a pedestrian close to a location where the server is telling us that there is a pedestrian + # we assign the ID to the object obstacle pedestrian + + ped_list = self.map_manager.world.get_actors() + ped_list = [p for p in ped_list if p.get_location().distance(ego_pos.location) < 50] + + for i, walker in enumerate(returnedObjects): + for p in ped_list: + p_loc = p.get_location() + if abs(p_loc.x - walker.xPosition) <= 3.0 and \ + abs(p_loc.y - walker.yPosition) <= 3.0: + x = round(walker.xPosition, 2) + y = round(walker.yPosition, 2) + dist = round(math.sqrt( + math.pow((x - ego_pos.location.x), 2) + math.pow((y - ego_pos.location.y), + 2)), 2) + print(f'Pedestrian {p.id} detected {dist} meters away by vehicle {self.vehicle.id}' + f' at position (x = {x}, y = {y})') + # assign pedestrian ID to obstacle pedestrian object + walker.id = p.id + + + return def getInfo(self): """ @@ -494,3 +575,5 @@ def appendObject(self, obj, id): obj.o3d_bbx = self.LDMobj_to_o3d_bbx(obj) self.LDM[id].insertPerception(obj) + + diff --git a/opencda/customize/core/sensing/perception/perception_manager.py b/opencda/customize/core/sensing/perception/perception_manager.py index 911ffd513..f12753d18 100644 --- a/opencda/customize/core/sensing/perception/perception_manager.py +++ b/opencda/customize/core/sensing/perception/perception_manager.py @@ -209,6 +209,7 @@ def dist(self, a): def detect(self, ego_pos): """ Detect surrounding objects. Currently only vehicle detection supported. + Implementation for pedestrians detection ongoing. Parameters ---------- @@ -224,7 +225,8 @@ def detect(self, ego_pos): self.ego_pos = ego_pos objects = {'vehicles': [], - 'traffic_lights': []} + 'traffic_lights': [], + 'VRU': []} if not self.activate: objects = self.deactivate_mode(objects) diff --git a/opencda/customize/ml_libs/ml_manager.py b/opencda/customize/ml_libs/ml_manager.py index 36c03105a..720cfc3d4 100644 --- a/opencda/customize/ml_libs/ml_manager.py +++ b/opencda/customize/ml_libs/ml_manager.py @@ -66,6 +66,12 @@ def draw_2d_box(self, result, rgb_image, index): if is_vehicle_cococlass(label): label_name = 'vehicle' + elif is_pedestrian_cococlass(label): + label_name = 'pedestrian' + elif label == 1: + label_name = 'bicycle' + elif label == 3: + label_name = 'motorcycle' x1, y1, x2, y2 = int( detection[0]), int( @@ -90,5 +96,18 @@ def is_vehicle_cococlass(label): -is_vehicle: bool whether this label belongs to the vehicle class """ - vehicle_class_array = np.array([1, 2, 3, 5, 7], dtype=np.int) + vehicle_class_array = np.array([2, 5, 7], dtype=np.int) return True if 0 in (label - vehicle_class_array) else False + +def is_pedestrian_cococlass(label): + """ + Check whether the label belongs to the vehicle class according + to coco dataset. + Args: + -label(int): yolo detection prediction. + Returns: + -is_vehicle: bool + whether this label belongs to the vehicle class + """ + VRU_class_array = np.array([0], dtype=np.int) + return True if 0 in (label - VRU_class_array) else False diff --git a/opencda/customize/v2x/LDM.py b/opencda/customize/v2x/LDM.py index 1351b916a..e82e4410c 100644 --- a/opencda/customize/v2x/LDM.py +++ b/opencda/customize/v2x/LDM.py @@ -9,8 +9,12 @@ o3d_visualizer_init, o3d_pointcloud_encode, o3d_visualizer_show, o3d_visualizer_showLDM from opencda.core.sensing.perception.obstacle_vehicle import \ ObstacleVehicle +from opencda.core.sensing.perception.obstacle_pedestrian import \ + ObstacleVRU import opencda.core.sensing.perception.sensor_transformation as st import csv +import os +import pandas as pd from opencda.customize.v2x.LDMutils import LDM_to_lidarObjects from opencda.customize.v2x.LDMutils import matchLDMobject @@ -21,7 +25,10 @@ from scipy.optimize import linear_sum_assignment as linear_assignment from opencda.customize.v2x.aux import newLDMentry from opencda.customize.v2x.aux import Perception + from opencda.customize.v2x.LDMutils import PO_kalman_filter +from opencda.customize.v2x.LDMutils import pedestrian_kalman_filter + from opencda.customize.v2x.LDMutils import obj_to_o3d_bbx from opencda.customize.v2x.aux import ColorGradient @@ -72,11 +79,99 @@ def match_LDM(self, object_list): math.pow((obj.xPosition - LDMpredX), 2) + math.pow( (obj.yPosition - LDMpredY), 2)) iou = compute_IoU(LDMpredbbx, object_list[j].o3d_bbx) - try: - iou = compute_IoU_lineSet(LDMpredline_set, object_list[j].line_set) - except RuntimeError as e: - # print("Unable to compute the oriented bounding box:", e) - pass + # try: + # iou = compute_IoU_lineSet(LDMpredline_set, object_list[j].line_set) + # except RuntimeError as e: + # # print("Unable to compute the oriented bounding box:", e) + # pass + if iou > 1: + print("Error") + if iou > 0: + IoU_map[i, j] = iou + elif dist < 1: # elif dist < 3 -->IoU_map[i, j] = dist - 1000 + IoU_map[i, j] = 0 + else: + IoU_map[i, j] = -1000 + i += 1 + ldm_ids.append(ID) + + matched, new = linear_assignment(-IoU_map) + return IoU_map, new, matched, ldm_ids + return None, None, None, None + + def match_LDM_vehicles(self, object_list): + LDM_vehicles = {} + for ID, LDMobj in self.LDM.items(): + if LDMobj.itsType == 'vehicle': + LDM_vehicles[ID] = LDMobj + + if len(self.LDM) != 0: + IoU_map = np.zeros((len(LDM_vehicles), len(object_list)), dtype=np.float32) + i = 0 + ldm_ids = [] + for ID, LDMobj in LDM_vehicles.items(): + for j in range(len(object_list)): + obj = object_list[j] + object_list[j].o3d_bbx, object_list[j].line_set = self.cav.LDMobj_to_o3d_bbx(obj) + LDMpredX = LDMobj.perception.xPosition + LDMpredY = LDMobj.perception.yPosition + LDMpredbbx, LDMpredline_set = get_o3d_bbx(self.cav, LDMpredX, LDMpredY, LDMobj.perception.width, + LDMobj.perception.length, LDMobj.perception.yaw) + + dist = math.sqrt( + math.pow((obj.xPosition - LDMpredX), 2) + math.pow( + (obj.yPosition - LDMpredY), 2)) + iou = compute_IoU(LDMpredbbx, object_list[j].o3d_bbx) + # try: + # iou = compute_IoU_lineSet(LDMpredline_set, object_list[j].line_set) + # except RuntimeError as e: + # # print("Unable to compute the oriented bounding box:", e) + # pass + if iou > 1: + print("Error") + if iou > 0: + IoU_map[i, j] = iou + elif dist < 3: # if dist < 3 --> IoU_map[i, j] = 0 + IoU_map[i, j] = dist - 1000 + else: + IoU_map[i, j] = -1000 + i += 1 + ldm_ids.append(ID) + + matched, new = linear_assignment(-IoU_map) + return IoU_map, new, matched, ldm_ids + return None, None, None, None + + def match_LDM_VRU(self, object_list): + + LDM_VRU = {} + for ID, LDMobj in self.LDM.items(): + if LDMobj.itsType in ['pedestrian', 'bicycle', 'motorcycle']: + LDM_VRU[ID] = LDMobj + + if len(self.LDM) != 0: + IoU_map = np.zeros((len(LDM_VRU), len(object_list)), dtype=np.float32) + i = 0 + ldm_ids = [] + # for ID, LDMobj in self.LDM.items(): + for ID, LDMobj in LDM_VRU.items(): + for j in range(len(object_list)): + obj = object_list[j] + object_list[j].o3d_bbx, object_list[j].line_set = self.cav.LDMobj_to_o3d_bbx(obj) + LDMpredX = LDMobj.perception.xPosition + LDMpredY = LDMobj.perception.yPosition + LDMpredbbx, LDMpredline_set = get_o3d_bbx(self.cav, LDMpredX, LDMpredY, LDMobj.perception.width, + LDMobj.perception.length, LDMobj.perception.yaw) + + dist = math.sqrt( + math.pow((obj.xPosition - LDMpredX), 2) + math.pow( + (obj.yPosition - LDMpredY), 2)) + iou = compute_IoU(LDMpredbbx, object_list[j].o3d_bbx) + # try: + # iou = compute_IoU_lineSet(LDMpredline_set, object_list[j].line_set) + # except RuntimeError as e: + # # print("Unable to compute the oriented bounding box:", e) + # pass if iou > 0: IoU_map[i, j] = iou elif dist < 3: # if dist < 3 --> IoU_map[i, j] = 0 @@ -92,18 +187,35 @@ def match_LDM(self, object_list): def updateLDM(self, object_list): # Predict position of current LDM tracks before attempting to match for ID, LDMobj in self.LDM.items(): - # diff = self.cav.time - LDMobj.perception.timestamp self.LDM[ID].onSight = False # It will go back to True when appending if we match it - LDMobj.perception.xPosition, \ - LDMobj.perception.yPosition, \ - LDMobj.perception.xSpeed, \ - LDMobj.perception.ySpeed, \ - LDMobj.perception.xacc, \ - LDMobj.perception.yacc = self.LDM[ID].kalman_filter.predict(self.cav.get_time_ms()) + + if (LDMobj.itsType == "pedestrian"): + LDMobj.perception.xPosition, \ + LDMobj.perception.yPosition, \ + LDMobj.perception.xSpeed, \ + LDMobj.perception.ySpeed = self.LDM[ID].kalman_filter.predict(self.cav.get_time_ms()) + + + self.log_kalman_prediction({ + 'Timestamp': self.cav.get_time(), + 'ID': ID, + 'Prediction_x': LDMobj.perception.xPosition, + 'Prediction_y': LDMobj.perception.yPosition + }) + + else: + LDMobj.perception.xPosition, \ + LDMobj.perception.yPosition, \ + LDMobj.perception.xSpeed, \ + LDMobj.perception.ySpeed, \ + LDMobj.perception.xacc, \ + LDMobj.perception.yacc = self.LDM[ID].kalman_filter.predict(self.cav.get_time_ms()) + LDMobj.perception.o3d_bbx, LDMobj.perception.line_set = self.cav.LDMobj_to_o3d_bbx(LDMobj.perception) - LDMobj.perception.timestamp = self.cav.time + LDMobj.perception.timestamp = self.cav.get_time() - IoU_map, new, matched, ldm_ids = self.match_LDM(object_list['vehicles']) + + IoU_map, new, matched, ldm_ids = self.match_LDM_vehicles(object_list['vehicles']) for j in range(len(object_list['vehicles'])): obj = object_list['vehicles'][j] obj.o3d_bbx, obj.line_set = self.cav.LDMobj_to_o3d_bbx(obj) @@ -117,14 +229,49 @@ def updateLDM(self, object_list): newID = obj.id if obj.id == 1: print('newID:', newID) - self.LDM[newID] = newLDMentry(obj, newID, connected=False, onSight=True) + self.LDM[newID] = newLDMentry(obj, newID, detected=True, onSight=True) + # self.LDM[newID].kalman_filter = PO_kalman_filter(Q_noise_std_dev=5, R_std_dev=0.5) self.LDM[newID].kalman_filter = PO_kalman_filter() self.LDM[newID].kalman_filter.init_step(obj.xPosition, obj.yPosition, - vx=self.cav.vehicle.get_velocity().x, - vy=self.cav.vehicle.get_velocity().y, - ax=self.cav.vehicle.get_acceleration().x, - ay=self.cav.vehicle.get_acceleration().y) + vx=obj.xSpeed, + vy=obj.ySpeed, + ax=obj.xacc, + ay=obj.yacc) + + IoU_map, new, matched, ldm_ids = self.match_LDM_VRU(object_list['VRU']) + for j in range(len(object_list['VRU'])): + obj = object_list['VRU'][j] + obj.o3d_bbx, obj.line_set = self.cav.LDMobj_to_o3d_bbx(obj) + if IoU_map is not None: + matchedObj = matched[np.where(new == j)[0]] + if IoU_map[matchedObj, j] != -1000: + self.appendObject(obj, ldm_ids[matchedObj[0]]) + continue + # we are detecting a new object + # newID = self.LDM_ids.pop() + newID = obj.id + if obj.id == 1: + print('newID:', newID) + self.LDM[newID] = newLDMentry(obj, newID, detected=True, onSight=True) + # self.LDM[newID].kalman_filter = PO_kalman_filter(Q_noise_std_dev=0.5, R_std_dev=0.5) + # self.LDM[newID].kalman_filter = PO_kalman_filter() + if self.LDM[newID].itsType == 'motorcycle' or self.LDM[newID].itsType == 'bicycle': + self.LDM[newID].kalman_filter = PO_kalman_filter() + self.LDM[newID].kalman_filter.init_step(x=obj.xPosition, + y=obj.yPosition, + vx=obj.xSpeed, + vy=obj.ySpeed, + ax=obj.xacc, + ay=obj.yacc) + else: + self.LDM[newID].kalman_filter = pedestrian_kalman_filter() + self.LDM[newID].kalman_filter.init_step(x=obj.xPosition, + y=obj.yPosition, + vx=obj.xSpeed, + vy=obj.ySpeed, + now=self.cav.get_time_ms()) + # Delete old perceptions if self.cav.time > 2.0: @@ -134,10 +281,20 @@ def updateLDM(self, object_list): del self.LDM[ID] self.LDM_ids.add(ID) + # delete old connected objects if self.cav.time > 2.0: T = self.cav.time - 0.5 old_ids = [ID for ID, LDMobj in self.LDM.items() if LDMobj.getLatestPoint().timestamp <= T - and (self.cav.vehicle.id not in LDMobj.perceivedBy or not LDMobj.tracked)] + and (self.cav.vehicle.id not in LDMobj.perceivedBy or not LDMobj.tracked) and not LDMobj.itsType == "pedestrian"] + for ID in old_ids: + del self.LDM[ID] + self.LDM_ids.add(ID) + + # delete old connected pedestrians + if self.cav.time > 2.0: + T = self.cav.time - 3 + old_ids = [ID for ID, LDMobj in self.LDM.items() if LDMobj.getLatestPoint().timestamp <= T + and (self.cav.vehicle.id not in LDMobj.perceivedBy or not LDMobj.tracked) and LDMobj.itsType == "pedestrian"] for ID in old_ids: del self.LDM[ID] self.LDM_ids.add(ID) @@ -164,22 +321,26 @@ def updateLDM(self, object_list): gt) def LDM_to_lidarObjects(self): - lidarObjects = [] + lidarObjectsVeh = [] + lidarObjectsVRU = [] for ID, LDMobj in self.LDM.items(): egoPos = self.cav.localizer.get_ego_pos() dist = math.sqrt(math.pow((LDMobj.perception.xPosition - egoPos.location.x), 2) + math.pow((LDMobj.perception.yPosition - egoPos.location.y), 2)) if dist < 100: - lidarObjects.append(LDMobj) # return last sample of each object in LDM - return {'vehicles': lidarObjects} + if LDMobj.itsType == 'vehicle': + lidarObjectsVeh.append(LDMobj) # return last sample of each object in LDM + else: + lidarObjectsVRU.append(LDMobj) + return {'vehicles': lidarObjectsVeh, 'VRU': lidarObjectsVRU} def clean_duplicates(self): - objects = [obj.perception for obj in self.LDM.values()] + objects = [obj.perception for obj in self.LDM.values() if obj.itsType != "pedestrian"] IoU_map, new, matched, ldm_ids = self.match_LDM(objects) indices_to_delete = [] for i in range(len(objects)): for j in range(i + 1, len(objects)): - if IoU_map[i][j] > 0 and not self.LDM[ldm_ids[j]].connected: + if IoU_map[i][j] > 0 and self.LDM[ldm_ids[j]].detected: indices_to_delete.append(j) indices_to_delete = list(set(indices_to_delete)) @@ -196,7 +357,7 @@ def appendObject(self, obj, id): self.LDM[id].onSight = True # Compute the estimated heading angle obj.heading = obj.yaw - if self.LDM[id].connected is True: + if self.LDM[id].detected is False or self.LDM[id].VAM is True: # If this entry is of a connected vehicle # obj.connected = True # We do this to take the width and length from CAM always obj.width = self.LDM[id].perception.width @@ -214,19 +375,61 @@ def appendObject(self, obj, id): obj.width = width_max obj.length = length_max yaw_list = [obj.yaw] + xpos = [obj.xPosition] + ypos = [obj.yPosition] for prev_obj in self.LDM[id].pathHistory: yaw_list.append(prev_obj.yaw) + xpos.append(prev_obj.xPosition) + ypos.append(prev_obj.yPosition) obj.yaw = np.mean(yaw_list) + delta_x = xpos[0]-xpos[-1] + delta_y = ypos[0]-ypos[-1] + heading_rad = math.atan2(delta_y, delta_x) + heading_deg = math.degrees(heading_rad) + obj.yaw = heading_deg + obj.o3d_bbx, obj.line_set = LDMobj_to_o3d_bbx(self.cav, obj) - x, y, vx, vy, ax, ay = self.LDM[id].kalman_filter.update(obj.xPosition, obj.yPosition, self.cav.get_time_ms()) - obj.xPosition = x - obj.yPosition = y - obj.xSpeed = vx - obj.ySpeed = vy - obj.xacc = ax - obj.yacc = ay + if self.LDM[id].itsType == 'pedestrian': + x, y, vx, vy = self.LDM[id].kalman_filter.update(obj.xPosition, obj.yPosition, self.cav.get_time_ms()) + else: + x, y, vx, vy, ax, ay = self.LDM[id].kalman_filter.update(obj.xPosition, obj.yPosition, self.cav.get_time_ms()) + + gt = self.cav.perception_manager.getGTobjects() + for key,list in gt.items(): + if key == "VRU": + for item in list: + if item.carla_id == id: + # print("ID: ", id) + # print('Measurement: ', "x: ", obj.xPosition, ",y: ", obj.yPosition) + # print("GT: ", "x: ", item.location.x, ",y: ", item.location.y) + # print('KFupdate: ', "x: ", x, ",y: ", y) + self.log_kalman_data({ + 'Timestamp' : obj.timestamp, + 'ID': id, + 'Measurement_x' : obj.xPosition, + 'Measurement_y': obj.yPosition, + 'GT_x' : item.location.x, + 'GT_y' : item.location.y, + 'KFupdate_x' : x, + 'KFupdate_y' : y + }) + + if self.LDM[id].itsType == 'pedestrian': + obj.xPosition = x + obj.yPosition = y + + else: + obj.xPosition = x + obj.yPosition = y + obj.xSpeed = vx + obj.ySpeed = vy + + # obj.xSpeed = vx + # obj.ySpeed = vy + # obj.xacc = ax + # obj.yacc = ay # obj.confidence = self.computeGT_accuracy(obj, id) self.LDM[id].insertPerception(obj) @@ -264,11 +467,11 @@ def cleanDuplicates(self): if matchedId != -1: if LDMobj.PLU is True and self.LDM[matchedId].PLU is False: duplicates.append(matchedId) - elif LDMobj.connected is False and self.LDM[matchedId].connected: + elif LDMobj.detected is True and self.LDM[matchedId].detected is False: duplicates.append(matchedId) - elif LDMobj.connected is False and LDMobj.timestamp > self.LDM[matchedId].timestamp: + elif LDMobj.detected is True and LDMobj.timestamp > self.LDM[matchedId].timestamp: duplicates.append(matchedId) - elif LDMobj.connected is False and \ + elif LDMobj.detected is True and \ (LDMobj.width + LDMobj.length) > (self.LDM[matchedId].width + self.LDM[matchedId].length): duplicates.append(matchedId) elif LDMobj.connected is True: @@ -295,10 +498,86 @@ def getCPM(self): def getAllPOs(self): POs = [] for ID, LDMobj in self.LDM.items(): - if not LDMobj.connected: + if LDMobj.detected: POs.append(LDMobj) return POs + def VAMfusion(self, VAMobject): + VAMobject.connected = True + ego_pos, ego_spd, objects = self.cav.getInfo() + + #todo: adjust the confidence value coming fromm VAMs + VAMobject.confidence = 0.8 + + # if self.last_update > VAMobject.timestamp: + # diff = self.last_update - VAMobject.timestamp + # # If it's an old perception, we need to predict its current position + # VAMobject.xPosition += VAMobject.xSpeed * diff + 0.5 * VAMobject.xacc * (diff ** 2) + # VAMobject.yPosition += VAMobject.ySpeed * diff + 0.5 * VAMobject.yacc * (diff ** 2) + # + # #and we penalize the confidence + # VAMobject.confidence *= math.exp(-diff) + + VAMobject.o3d_bbx, VAMobject.line_set = get_o3d_bbx(self.cav, + VAMobject.xPosition, + VAMobject.yPosition, + VAMobject.width, + VAMobject.length, + VAMobject.yaw) + + if self.LDM.get(VAMobject.id) and self.LDM[VAMobject.id].VAM: + print(self.cav.get_time(), f" - Not the first VAM - id {VAMobject.id}") + # if it is not the first time we receive this VAM + self.LDM[VAMobject.id].kalman_filter.predict(self.cav.get_time_ms()) + x, y, vx, vy = self.LDM[VAMobject.id].kalman_filter.update(VAMobject.xPosition, + VAMobject.yPosition, + self.cav.get_time_ms()) + # print('KFupdate: ', "x: ", x, ",y: ", y, ",vx: ", vx, ",vy: ", vy, ",ax: ", ax, ",ay: ", ay) + + VAMobject.xPosition = x + VAMobject.yPosition = y + # VAMobject.xSpeed = vx + # VAMobject.ySpeed = vy + # VAMobject.xacc = ax + # VAMobject.yacc = ay + self.LDM[VAMobject.id].insertPerception(VAMobject) + + else: #it is the first time we receive this VAM, we try to match it + IoU_map, new, matched, ldm_ids = self.match_LDM([VAMobject]) + + if IoU_map is not None: + if IoU_map[matched[0], new[0]] >= 0.2: + # We are also perceiving this object - or it is another pedestrian sending VAM + # check if the id that we matched is connected or not, if so, we matched wrongly + if not self.LDM[ldm_ids[matched[0]]].VAM: + print(self.cav.get_time(), " - first time VAM - appending to LDM") + self.append_VAM_object(VAMobject, ldm_ids[matched[0]]) + return + dist = math.sqrt( + math.pow((VAMobject.xPosition - ego_pos.location.x), 2) + math.pow( + (VAMobject.yPosition - ego_pos.location.y), 2)) + + if dist < 3: + return + # newID = self.LDM_ids.pop() # TODO: solve ms-van3t api resulting in changing POids from same cav + print(self.cav.get_time(), f" - New entry - ID {VAMobject.id}") + print("speed x = ", VAMobject.xSpeed, " y = ", VAMobject.ySpeed) + newID = VAMobject.id + self.LDM[newID] = newLDMentry(VAMobject, newID, detected=False, onSight=False) #detected=false + self.LDM[newID].CPM = False + self.LDM[newID].VAM = True + # self.LDM[newID].perceivedBy.append(VAMobject) + # self.LDM[newID].kalman_filter = PO_kalman_filter(Q_noise_std_dev=0.5, R_std_dev=0.5) + # self.LDM[newID].kalman_filter = PO_kalman_filter() + self.LDM[newID].kalman_filter = pedestrian_kalman_filter() + self.LDM[newID].kalman_filter.init_step(x=VAMobject.xPosition, + y=VAMobject.yPosition, + vx=VAMobject.xSpeed, + vy=VAMobject.ySpeed, + now=self.cav.get_time_ms()) + + + def CAMfusion(self, CAMobject): CAMobject.connected = True CAMobject.o3d_bbx, CAMobject.line_set = get_o3d_bbx(self.cav, @@ -308,20 +587,19 @@ def CAMfusion(self, CAMobject): CAMobject.length, CAMobject.yaw) if CAMobject.id in self.LDM: - # If this is not the first CAM from this vehicle + # If this is not the first CAM self.LDM[CAMobject.id].kalman_filter.predict(self.cav.get_time_ms()) x, y, vx, vy, ax, ay = self.LDM[CAMobject.id].kalman_filter.update(CAMobject.xPosition, CAMobject.yPosition, self.cav.get_time_ms()) # print('KFupdate: ', "x: ", x, ",y: ", y, ",vx: ", vx, ",vy: ", vy, ",ax: ", ax, ",ay: ", ay) CAMobject.xPosition = x CAMobject.yPosition = y - # We trust the CAM speed - # CAMobject.xSpeed = vx - # CAMobject.ySpeed = vy + CAMobject.xSpeed = vx + CAMobject.ySpeed = vy CAMobject.xacc = ax CAMobject.yacc = ay + self.LDM[CAMobject.id].detected = False self.LDM[CAMobject.id].insertPerception(CAMobject) - self.LDM[CAMobject.id].connected = True else: # If this is the first CAM, check if we are already perceiving it IoU_map, new, matched, ldm_ids = self.match_LDM([CAMobject]) @@ -333,7 +611,8 @@ def CAMfusion(self, CAMobject): # Create new entry if CAMobject.id in self.cav.LDM_ids: self.cav.LDM_ids.remove(CAMobject.id) - self.LDM[CAMobject.id] = newLDMentry(CAMobject, CAMobject.id, connected=True, onSight=True) + self.LDM[CAMobject.id] = newLDMentry(CAMobject, CAMobject.id, detected=False, onSight=True) + # self.LDM[CAMobject.id].kalman_filter = PO_kalman_filter(Q_noise_std_dev=5, R_std_dev=0.5) self.LDM[CAMobject.id].kalman_filter = PO_kalman_filter() self.LDM[CAMobject.id].kalman_filter.init_step(CAMobject.xPosition, CAMobject.yPosition, @@ -394,22 +673,32 @@ def CPMfusion(self, object_list, fromID): continue # newID = self.LDM_ids.pop() # TODO: solve ms-van3t api resulting in changing POids from same cav newID = CPMobj.id - self.LDM[newID] = newLDMentry(CPMobj, newID, connected=False, onSight=False) + self.LDM[newID] = newLDMentry(CPMobj, newID, detected=True, onSight=False) self.LDM[newID].CPM = True self.LDM[newID].perceivedBy.append(fromID) - self.LDM[newID].kalman_filter = PO_kalman_filter() - self.LDM[newID].kalman_filter.init_step(CPMobj.xPosition, - CPMobj.yPosition, - CPMobj.xSpeed, - CPMobj.ySpeed, - CPMobj.xacc, - CPMobj.yacc) + # self.LDM[newID].kalman_filter = PO_kalman_filter() + if CPMobj.itsType == "pedestrian": + self.LDM[newID].kalman_filter = pedestrian_kalman_filter() + self.LDM[newID].kalman_filter.init_step(x=CPMobj.xPosition, + y=CPMobj.yPosition, + vx=CPMobj.xSpeed, + vy=CPMobj.ySpeed, + now=self.cav.get_time_ms()) + else: + self.LDM[newID].kalman_filter = PO_kalman_filter() + self.LDM[newID].kalman_filter.init_step(CPMobj.xPosition, + CPMobj.yPosition, + CPMobj.xSpeed, + CPMobj.ySpeed, + CPMobj.xacc, + CPMobj.yacc) # self.recvCPMmap[fromID][CPMobj.id] = newID + def append_CPM_object(self, CPMobj, id, fromID): if fromID not in self.LDM[id].perceivedBy: self.LDM[id].perceivedBy.append(fromID) - if CPMobj.timestamp < self.LDM[id].getLatestPoint().timestamp - 100: # Consider objects up to 100ms old + if CPMobj.timestamp < self.LDM[id].getLatestPoint().timestamp - 0.100: # Consider objects up to 100ms old return newLDMobj = Perception(CPMobj.xPosition, @@ -420,6 +709,8 @@ def append_CPM_object(self, CPMobj, id, fromID): CPMobj.confidence) newLDMobj.yaw = CPMobj.yaw newLDMobj.heading = CPMobj.heading + newLDMobj.itsType = CPMobj.itsType + # If the object is also perceived locally if self.cav.vehicle.id in self.LDM[id].perceivedBy and self.LDM[id].onSight: # Compute weights depending on the POage and confidence (~distance from detecting vehicle) @@ -455,15 +746,19 @@ def append_CPM_object(self, CPMobj, id, fromID): else: self.LDM[id].onSight = False - x, y, vx, vy, ax, ay = self.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, + if CPMobj.itsType == "pedestrian": + x, y, vx, vy = self.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, + self.cav.get_time_ms()) + else: + x, y, vx, vy, ax, ay = self.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, self.cav.get_time_ms()) # print('KFupdate: ', "x: ", x, ",y: ", y, ",vx: ", vx, ",vy: ", vy, ",ax: ", ax, ",ay: ", ay) newLDMobj.xPosition = x newLDMobj.yPosition = y - newLDMobj.xSpeed = vx - newLDMobj.ySpeed = vy - newLDMobj.xacc = ax - newLDMobj.yacc = ay + # newLDMobj.xSpeed = vx + # newLDMobj.ySpeed = vy + # newLDMobj.xacc = ax + # newLDMobj.yacc = ay newLDMobj.o3d_bbx, newLDMobj.line_set = get_o3d_bbx(self.cav, newLDMobj.xPosition, @@ -471,10 +766,80 @@ def append_CPM_object(self, CPMobj, id, fromID): newLDMobj.width, newLDMobj.length, newLDMobj.yaw) - # cav.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, newLDMobj.width, newLDMobj.length) + self.LDM[id].insertPerception(newLDMobj) - self.LDM[id].CPM = True + # self.LDM[id].CPM = True + def append_VAM_object(self, VAMobj, id): + # VAMobj.id and id can be different + # if the VAM info is older than 100ms with respect to the one stored on the LDM, we do nothing + if VAMobj.timestamp < self.LDM[id].getLatestPoint().timestamp - 0.100: + return + + newLDMobj = Perception(VAMobj.xPosition, + VAMobj.yPosition, + VAMobj.width, + VAMobj.length, + VAMobj.timestamp, + VAMobj.confidence) + newLDMobj.yaw = VAMobj.yaw + newLDMobj.heading = VAMobj.heading + newLDMobj.itsType = VAMobj.itsType + + + # If the object is also perceived locally we "fuse" the information + if self.cav.vehicle.id in self.LDM[id].perceivedBy and self.LDM[id].detected: # .LDM[id].onSight: + # Compute weights depending on the POage and confidence (~distance from detecting vehicle) + LDMobj = self.LDM[id].perception + LDMobj_age = max(0.0, 100 - (self.cav.get_time() - LDMobj.timestamp)) + VAMobj_age = max(0.0, 100 - (self.cav.get_time() - VAMobj.timestamp)) + + score_VAM = VAMobj_age * VAMobj.confidence * 2 + score_LDM = LDMobj_age * LDMobj.confidence + total_score = score_LDM + score_VAM + if total_score == 0: + weightVAM = 0.5 + weightLDM = 0.5 + else: + weightVAM = score_VAM / total_score + weightLDM = score_LDM / total_score + + newLDMobj.xPosition = (LDMobj.xPosition * weightLDM + VAMobj.xPosition * weightVAM) / ( + weightLDM + weightVAM) + newLDMobj.yPosition = (LDMobj.yPosition * weightLDM + VAMobj.yPosition * weightVAM) / ( + weightLDM + weightVAM) + newLDMobj.xSpeed = (VAMobj.xSpeed * weightVAM + LDMobj.xSpeed * weightLDM) / (weightLDM + weightVAM) + newLDMobj.ySpeed = (VAMobj.ySpeed * weightVAM + LDMobj.ySpeed * weightLDM) / (weightLDM + weightVAM) + newLDMobj.width = (VAMobj.width * weightVAM + LDMobj.width * weightLDM) / (weightLDM + weightVAM) + newLDMobj.length = (VAMobj.length * weightVAM + LDMobj.length * weightLDM) / (weightLDM + weightVAM) + newLDMobj.heading = (VAMobj.heading * weightVAM + LDMobj.heading * weightLDM) / (weightLDM + weightVAM) + newLDMobj.yaw = (VAMobj.yaw * weightVAM + LDMobj.yaw * weightLDM) / (weightLDM + weightVAM) + # newLDMobj.confidence = (VAMobj.confidence + LDMobj.confidence) / 2 + newLDMobj.timestamp = self.cav.get_time() + + # self.LDM[id].onSight = True + elif self.LDM[id].CPM: + self.LDM[id].onSight = False + + x, y, vx, vy = self.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, + self.cav.get_time_ms()) + # print('KFupdate: ', "x: ", x, ",y: ", y, ",vx: ", vx, ",vy: ", vy, ",ax: ", ax, ",ay: ", ay) + newLDMobj.xPosition = x + newLDMobj.yPosition = y + # newLDMobj.xSpeed = vx + # newLDMobj.ySpeed = vy + # newLDMobj.xacc = ax + # newLDMobj.yacc = ay + + newLDMobj.o3d_bbx, newLDMobj.line_set = get_o3d_bbx(self.cav, + newLDMobj.xPosition, + newLDMobj.yPosition, + newLDMobj.width, + newLDMobj.length, + newLDMobj.yaw) + # cav.LDM[id].kalman_filter.update(newLDMobj.xPosition, newLDMobj.yPosition, newLDMobj.width, newLDMobj.length) + self.LDM[id].insertPerception(newLDMobj) + self.LDM[id].VAM = True def getLDM_tracked(self): tracked = {} for ID, LDMobj in self.LDM.items(): @@ -498,6 +863,7 @@ def get_LDM_size(self): def LDM2OpencdaObj(self, trafficLights): LDM = self.getLDM_tracked() retObjects = [] + retObjectsVRU = [] for ID, LDMObject in LDM.items(): corner = np.asarray(LDMObject.perception.o3d_bbx.get_box_points()) # covert back to unreal coordinate @@ -508,8 +874,86 @@ def LDM2OpencdaObj(self, trafficLights): # project to world reference corner = st.sensor_to_world(corner, self.cav.perception_manager.lidar.sensor.get_transform()) corner = corner.transpose()[:, :3] - object = ObstacleVehicle(corner, LDMObject.perception.o3d_bbx) - object.carla_id = LDMObject.id - retObjects.append(object) + if LDMObject.perception.itsType == 'vehicle': + object = ObstacleVehicle(corner, LDMObject.perception.o3d_bbx) + object.carla_id = LDMObject.id + retObjects.append(object) + elif LDMObject.perception.itsType == 'pedestrian': + object = ObstacleVRU(corner, LDMObject.perception.o3d_bbx) + object.carla_id = LDMObject.id + object.itsType = 'pedestrian' + retObjectsVRU.append(object) + elif LDMObject.perception.itsType == 'bicycle': + object = ObstacleVRU(corner, LDMObject.perception.o3d_bbx) + object.carla_id = LDMObject.id + object.itsType = 'bicycle' + retObjectsVRU.append(object) + elif LDMObject.perception.itsType == 'motorcycle': + object = ObstacleVRU(corner, LDMObject.perception.o3d_bbx) + object.carla_id = LDMObject.id + object.itsType = 'motorcycle' + retObjectsVRU.append(object) + return {'vehicles': retObjects, 'traffic_lights': trafficLights, 'VRU': retObjectsVRU} + + def log_kalman_data(self, data_entry, log_directory="kalman_logs"): + """ + Logs Kalman filter data (measurement, ground truth, KF update) to a CSV file. + + Each object ID gets its own CSV file named "kalman_filter_.csv". + The function appends data to existing files or creates new ones. + + Args: + data_entry (dict): A dictionary containing data for a single object at a single timestamp. + Expected keys: 'ID', 'Measurement_x', 'Measurement_y', + 'GT_x', 'GT_y', 'KFupdate_x', 'KFupdate_y'. + log_directory (str): The directory where log files will be saved. + Defaults to "kalman_logs". + """ + # Ensure the log directory exists + if not os.path.exists(log_directory): + os.makedirs(log_directory) + + obj_id = data_entry['ID'] + file_name = os.path.join(log_directory, f"kalman_filter_{obj_id}.csv") + + # Create a DataFrame for the current data entry + # Using a list of dicts for single row to ensure correct DataFrame structure + df = pd.DataFrame([{ + 'timestamp': data_entry['Timestamp'], + 'measurement_x': data_entry['Measurement_x'], + 'measurement_y': data_entry['Measurement_y'], + 'gt_x': data_entry['GT_x'], + 'gt_y': data_entry['GT_y'], + 'kfupdate_x': data_entry['KFupdate_x'], + 'kfupdate_y': data_entry['KFupdate_y'] + }]) + + # Check if the file exists to decide whether to write headers + write_header = not os.path.exists(file_name) + + # Append the data to the CSV file + df.to_csv(file_name, mode='a', header=write_header, index=False) + + def log_kalman_prediction(self, data_entry, log_directory="kalman_prediction"): + log_directory = f"kalman_prediction_{self.cav.vehicle.id}" + if not os.path.exists(log_directory): + os.makedirs(log_directory) + + self.cav.vehicle.id + obj_id = data_entry['ID'] + file_name = os.path.join(log_directory, f"kalman_pred_{obj_id}.csv") + + # Create a DataFrame for the current data entry + # Using a list of dicts for single row to ensure correct DataFrame structure + df = pd.DataFrame([{ + 'timestamp': data_entry['Timestamp'], + 'prediction_x': data_entry['Prediction_x'], + 'prediction_y': data_entry['Prediction_y'], + }]) + + # Check if the file exists to decide whether to write headers + write_header = not os.path.exists(file_name) + + # Append the data to the CSV file + df.to_csv(file_name, mode='a', header=write_header, index=False) - return {'vehicles': retObjects, 'traffic_lights': trafficLights} diff --git a/opencda/customize/v2x/LDMutils.py b/opencda/customize/v2x/LDMutils.py index 4a396e6a6..f1b2a6106 100644 --- a/opencda/customize/v2x/LDMutils.py +++ b/opencda/customize/v2x/LDMutils.py @@ -1,6 +1,6 @@ import threading -from opencda.core.common.vehicle_manager import VehicleManager +# from opencda.core.common.vehicle_manager import VehicleManager from collections import deque import math import numpy as np @@ -18,6 +18,78 @@ from opencda.customize.v2x.aux import PLDMentry from opencda.customize.v2x.aux import Perception +class pedestrian_kalman_filter: + def __init__(self, initial_timestamp=None, measurement_noise_std=0.5, process_noise_strength=0.5): + self.kf = KalmanFilter(dim_x=4, dim_z=2) + self.dt = 0.05 + self.last_update = 0 + self.last_predict = 0 + self.kf.H = np.array([[1, 0, 0, 0], + [0, 0, 1, 0]]) + self.measurement_noise_std = measurement_noise_std + self.kf.R = np.diag([self.measurement_noise_std**2, self.measurement_noise_std**2]) + self.process_noise_strength = process_noise_strength + self.update_F_Q_matrices(self.dt) #initialize F and Q matrices + self.last_timestamp = initial_timestamp + self.is_initialized = False + + + def update_F_Q_matrices(self, dt): + self.kf.F = np.array([[1, dt, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, dt], + [0, 0, 0, 1]]) + dt2 = dt ** 2 + dt3 = dt ** 3 + dt4 = dt ** 4 + + self.kf.Q = self.process_noise_strength * np.array([ + [dt4 / 4, dt3 / 2, 0, 0], + [dt3 / 2, dt2, 0, 0], + [0, 0, dt4 / 4, dt3 / 2], + [0, 0, dt3 / 2, dt2] + ]) + def init_step(self, now, x, y, vx=0, vy=0): + # self.kf.P *= 1e-2 + self.kf.P = np.diag([ + self.measurement_noise_std**2, # Position x uncertainty (from sensor) + 2.0**2, # Velocity x uncertainty (e.g., 10 m/s initial uncertainty) + self.measurement_noise_std**2, # Position y uncertainty + 2.0**2 # Velocity y uncertainty + ]) + # Initial state [x, vx, y, vy] + self.kf.x = np.array([[x], [vx], [y], [vy]]) + self.last_timestamp = now + self.is_initialized = True + + + + def predict(self, now=None): + # predict when no vam is received + if not self.is_initialized: + raise RuntimeError("Filter must be initialized with an update before prediction.") + if now is None: + raise ValueError("current_timestamp cannot be None for prediction.") + if now is not None: + dt_actual = (now - self.last_timestamp)/1000 + if dt_actual > 0: + self.update_F_Q_matrices(dt_actual) # Update F and Q based on actual dt + self.kf.predict() # Perform the prediction using the internal KalmanFilter + self.last_timestamp = now # Update the last known timestamp + + return self.kf.x[0, 0], self.kf.x[2, 0], self.kf.x[1, 0], self.kf.x[3, 0] + + def update(self, measurement_x, measurement_y, now): + + self.predict(now) + + # Form the measurement vector + z = np.array([[measurement_x], [measurement_y]]) + self.kf.update(z) # Perform the update using the internal KalmanFilter + + self.last_timestamp = now # Ensure timestamp is accurate after update + + return self.kf.x[0, 0], self.kf.x[2, 0], self.kf.x[1, 0], self.kf.x[3, 0] class speed_kalman_filter: def __init__(self): @@ -48,7 +120,7 @@ def step(self, x, y): class PO_kalman_filter: - def __init__(self, dt=0.05): + def __init__(self, dt=0.05, Q_noise_std_dev=None, R_std_dev=None, ): self.kf = KalmanFilter(dim_x=6, dim_z=2) self.dt = dt # time step self.last_predict = 0 @@ -63,10 +135,20 @@ def __init__(self, dt=0.05): self.kf.H = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]]) # todo: refine this values - self.kf.R = np.array([[10, 0], - [0, 10]]) - noise_ax = 1 - noise_ay = 1 + if R_std_dev is None: + self.kf.R = np.array([[10, 0], + [0, 10]]) + else: + self.kf.R = np.array([[R_std_dev ** 2, 0], + [0, R_std_dev ** 2]]) + + if Q_noise_std_dev is None: + noise_ax = 1 + noise_ay = 1 + else: + noise_ax = Q_noise_std_dev + noise_ay = Q_noise_std_dev + self.kf.Q = np.array( [[self.dt ** 4 / 4 * noise_ax, 0, self.dt ** 3 / 2 * noise_ax, 0, self.dt ** 2 * noise_ax, 0], [0, self.dt ** 4 / 4 * noise_ay, 0, self.dt ** 3 / 2 * noise_ay, 0, self.dt ** 2 * noise_ay], diff --git a/opencda/customize/v2x/aux.py b/opencda/customize/v2x/aux.py index 677ce7847..9c6ba3869 100644 --- a/opencda/customize/v2x/aux.py +++ b/opencda/customize/v2x/aux.py @@ -2,11 +2,13 @@ import copy -def newLDMentry(perception, id, connected=False, onSight=True): +def newLDMentry(perception, id, detected=True, onSight=True): + # Function to create a new LDMentry from a perception retEntry = LDMentry(id, perception.xPosition, perception.yPosition, perception.width, perception.length, perception.timestamp, perception.confidence, xSpeed=perception.xSpeed, ySpeed=perception.ySpeed, - heading=perception.heading, connected=connected, o3d_bbx=perception.o3d_bbx, onSight=True) + heading=perception.heading, yaw=perception.yaw, detected=detected, o3d_bbx=perception.o3d_bbx, onSight=True, + itsType=perception.itsType) retEntry.insertPerception(retEntry.perception) return retEntry @@ -22,29 +24,30 @@ def newPLDMentry(perception, id, detected=True, onSight=True): class LDMentry: - def __init__(self, id, xPosition, yPosition, width, length, timestamp, confidence, xSpeed=0, ySpeed=0, heading=0, - connected=False, o3d_bbx=None, onSight=True): + def __init__(self, id, xPosition, yPosition, width, length, timestamp, confidence, xSpeed=0, ySpeed=0, heading=0, yaw=0, + detected=True, o3d_bbx=None, onSight=True, itsType ='vehicle'): self.perception = Perception(xPosition, yPosition, width, length, timestamp, confidence, - xSpeed=xSpeed, ySpeed=ySpeed, heading=heading, o3d_bbx=o3d_bbx) - self.perception.connected = connected + xSpeed=xSpeed, ySpeed=ySpeed, heading=heading, yaw=yaw, o3d_bbx=o3d_bbx, itsType=itsType) + self.perception.detected = detected self.pathHistory = deque([], maxlen=10) self.CPM_lastIncluded = None # Last included perception on a CPM # Metadata self.id = id - self.connected = connected # True if we received a CAM from this vehicle + self.detected = detected # False if this is a CV self.onSight = onSight # False if this is obj is not currently on sight - self.CPM = False # True if this object has been perceived from a CPM self.perceivedBy = [] self.kalman_filter = None self.tracked = False # True if this object has been tracked for at least 10 frames - + self.CPM = False # True if this object has been perceived from a CPM + self.itsType = itsType # For now ITStype can be either 'vehicle' or 'pedestrian' + self.VAM = False # True if this object has been perceived from a VAM def insertPerception(self, obj): self.perception = obj if len(self.pathHistory) >= 10: self.pathHistory.popleft() self.pathHistory.append(copy.deepcopy(obj)) - if len(self.pathHistory) == 10 or self.connected or self.CPM: + if len(self.pathHistory) == 10 or not self.detected or self.CPM: self.tracked = True def getLatestPoint(self): @@ -56,8 +59,9 @@ def getOldestPoint(self): class Perception: def __init__(self, xPosition, yPosition, width, length, timestamp, confidence, xSpeed=0, ySpeed=0, heading=0, - o3d_bbx=None, dxSpeed=0, dySpeed=0, fromID=None, ID=None, yaw = 0): + o3d_bbx=None, dxSpeed=0, dySpeed=0, fromID=None, ID=None, yaw=0, itsType='vehicle'): self.id = ID + self.itsType = itsType self.xPosition = xPosition self.yPosition = yPosition self.width = width @@ -78,7 +82,6 @@ def __init__(self, xPosition, yPosition, width, length, timestamp, confidence, x self.dySpeed = dySpeed self.xacc = 0 self.yacc = 0 - self.connected = False self.fromID = fromID @@ -153,3 +156,4 @@ def generate_gradient(self): gradient.append(color) return gradient + diff --git a/opencda/scenario_testing/config_yaml/default.yaml b/opencda/scenario_testing/config_yaml/default.yaml index 7fcf5ce67..046998388 100644 --- a/opencda/scenario_testing/config_yaml/default.yaml +++ b/opencda/scenario_testing/config_yaml/default.yaml @@ -27,7 +27,7 @@ rsu_base: perception: activate: false # when not activated, objects positions will be retrieved from server directly camera: - visualize: 4 # how many camera images need to be visualized. 0 means no visualization for camera + visualize: 0 # how many camera images need to be visualized. 0 means no visualization for camera num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num positions: @@ -39,14 +39,14 @@ rsu_base: visualize: true channels: 32 range: 120 - points_per_second: 1000000 + points_per_second: 560000 rotation_frequency: 20 # the simulation is 20 fps - upper_fov: 2 - lower_fov: -25 + upper_fov: 0 + lower_fov: -20 dropoff_general_rate: 0.3 dropoff_intensity_limit: 0.7 dropoff_zero_intensity: 0.4 - noise_stddev: 0.02 + noise_stddev: 0.0 localization: activate: true # when not activated, ego position will be retrieved from server directly dt: ${world.fixed_delta_seconds} # used for kalman filter @@ -68,15 +68,15 @@ vehicle_base: lidar: # lidar sensor configuration, check CARLA sensor reference for more details visualize: true channels: 32 - range: 50 - points_per_second: 100000 + range: 80 #50 + points_per_second: 655360 #100000 rotation_frequency: 20 # the simulation is 20 fps - upper_fov: 10.0 - lower_fov: -30.0 - dropoff_general_rate: 0.0 + upper_fov: 10 + lower_fov: -30.0 #-30 + dropoff_general_rate: 0.3 dropoff_intensity_limit: 1.0 dropoff_zero_intensity: 0.0 - noise_stddev: 0.0 + noise_stddev: 0.02 localization: activate: false # when not activated, ego position will be retrieved from server directly diff --git a/opencda/scenario_testing/config_yaml/ms_van3t_example_ml.yaml b/opencda/scenario_testing/config_yaml/ms_van3t_example_ml.yaml index 408a8e7b3..0b2dcfea5 100644 --- a/opencda/scenario_testing/config_yaml/ms_van3t_example_ml.yaml +++ b/opencda/scenario_testing/config_yaml/ms_van3t_example_ml.yaml @@ -1,24 +1,97 @@ +# Spawn background vehicles by spawn position +#Transform(Location(x=-87.623032, y=12.967159, z=0.600000), Rotation(pitch=0.000000, yaw=-179.840790, roll=0.000000)) +#Transform(Location(x=-67.254570, y=27.963758, z=0.600000), Rotation(pitch=0.000000, yaw=0.159198, roll=0.000000)) + carla_traffic_manager: global_distance: 4.0 global_speed_perc: -50 ignore_lights_percentage: 0 - vehicle_list: - - spawn_position: - - 131.032 - - 139.51 - - 0.3 - - 0 - - 0 - - 0 - vehicle_speed_perc: -200 - - spawn_position: - - 121.5344 - - 143.51 - - 0.3 - - 0 - - 0 - - 0 - vehicle_speed_perc: -200 +# vehicle_list: +# - spawn_position: +# - -41.825108 +# - -6.604221 +# - 0.6 +# - 0 +# - -90.161217 +# - 0 +# vehicle_speed_perc: -200 +# blueprint: motorcycle +# - spawn_position: +# - -78.034149 +# - 12.967159 +# - 0.6 +# - 0 +# - -179.840790 +# - 0 +# vehicle_speed_perc: -200 +# blueprint: bicycle +# - spawn_position: +# - -67.254570 +# - 27.963758 +# - 0.3 +# - 0 +# - 0 +# - 0 +# vehicle_speed_perc: -200 +# blueprint: Default + + pedestrian_by_radius: true + spawn_list_by_radius: + - n_pedestrian: 50 + radius: 70 + center: [ -58, -15 ] #-58,-15# if we do not specify, radius is around cav1 + same_destination: true +# - n_pedestrian: 30 +# radius: 15 +# center: [-70, 32] #-58,-15# if we do not specify, radius is around cav1 +# same_destination: true +# - n_pedestrian: 30 +# radius: 15 +# center: [ -40,-28 ] # if we do not specify, radius is around cav1 +# same_destination: true +# - n_pedestrian: 30 +# radius: 15 +# center: [ -58,-25 ] # if we do not specify, radius is around cav1 +# same_destination: true + + +# pedestrian_list: +# - spawn_position: +# - -36.741623 +# - -76.261688 +# - 0.177637 +# - spawn_position: +# - -49.452744 +# - -77.609886 +# - 0.177637 +# - spawn_position: +# - -35.085068 +# - -18.718769 +# - 0.177637 +# - spawn_position: +# - -65.538940 +# - 7.304374 +# - 0.177637 +# - spawn_position: +# - -32.596573 +# - -35.497936 +# - 0.177637 +# - spawn_position: #ok +# - -32.952545 +# - -3.692704 +# - 0.177637 +# - spawn_position: +# - -75.401207 +# - 5.655548 +# - 0.177637 +# - spawn_position: +# - -31.573521 +# - -45.152374 +# - 0.177637 +# - spawn_position: +# - -21.175884 +# - -76.382408 +# - 0.177637 vehicle_base: sensing: @@ -63,18 +136,18 @@ platoon_base: scenario: town: - name: Town06 + name: Town10HD background_traffic: - vehicle_num: 20 + vehicle_num: 20 # Spawn background vehicles by number (spawn position is randomly generated) single_cav_list: # this is for merging vehicle or single cav without v2x - name: cav1 - spawn_position: [121.7194, 139.51, 0.3, 0, 0, 0] - destination: [606.87, 145.39, 0] + spawn_position: [-69.644844, 24.471010, 0.6, 0, 0.159198, 0] # Transform(Location(x=-64.644844, y=24.471010, z=0.600000), Rotation(pitch=0.000000, yaw=0.0, roll=0.000000)) + destination: [606.87, 145.39, 0] #[-45.1796, -38.7032, 0] sensing: perception: activate: true camera: - visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera + visualize: 4 # how many camera images need to be visualized. 0 means no visualization for camera num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num positions: @@ -90,72 +163,14 @@ scenario: behavior: <<: *base_behavior max_speed: 70 # maximum speed, km/h - tailgate_speed: 111 - overtake_allowed: false - local_planner: - debug_trajectory: true - debug: false - - - name: cav2 - spawn_position: [111.7194, 143.51, 0.3, 0, 0, 0] - destination: [606.87, 145.39, 0] - sensing: - perception: - activate: true - camera: - visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera - num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) - # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num - positions: - - [2.5, 0, 1.0, 0] - - [0.0, 0.3, 1.8, 100] - - [0.0, -0.3, 1.8, -100] - - [-2.0, 0.0, 1.5, 180] - lidar: # lidar sensor configuration, check CARLA sensor reference for more details - visualize: true - v2x: - communication_range: 35 - ms-van3t: true - behavior: - <<: *base_behavior - max_speed: 100 # maximum speed, km/h - tailgate_speed: 111 + tailgate_speed: 80 overtake_allowed: true local_planner: debug_trajectory: true debug: false - - name: cav3 - spawn_position: [101.7194, 139.51, 0.3, 0, 0, 0] - destination: [606.87, 145.39, 0] - sensing: - perception: - activate: true - camera: - visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera - num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) - # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num - positions: - - [2.5, 0, 1.0, 0] - - [0.0, 0.3, 1.8, 100] - - [0.0, -0.3, 1.8, -100] - - [-2.0, 0.0, 1.5, 180] - lidar: # lidar sensor configuration, check CARLA sensor reference for more details - visualize: true - v2x: - communication_range: 35 - ms-van3t: true - behavior: - <<: *base_behavior - max_speed: 100 # maximum speed, km/h - tailgate_speed: 111 - overtake_allowed: true - local_planner: - debug_trajectory: true - debug: false - - - name: cav4 - spawn_position: [91.7194, 146.51, 0.3, 0, 0, 0] + - name: cav2 + spawn_position: [-87.623032, 12.967159, 0.3, 0, 180, 0] destination: [606.87, 145.39, 0] sensing: perception: @@ -170,7 +185,7 @@ scenario: - [0.0, -0.3, 1.8, -100] - [-2.0, 0.0, 1.5, 180] lidar: # lidar sensor configuration, check CARLA sensor reference for more details - visualize: true + visualize: false v2x: communication_range: 35 ms-van3t: true @@ -182,3 +197,61 @@ scenario: local_planner: debug_trajectory: true debug: false +## +# - name: cav3 +# spawn_position: [101.7194, 139.51, 0.3, 0, 0, 0] +# destination: [606.87, 145.39, 0] +# sensing: +# perception: +# activate: true +# camera: +# visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera +# num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) +# # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num +# positions: +# - [2.5, 0, 1.0, 0] +# - [0.0, 0.3, 1.8, 100] +# - [0.0, -0.3, 1.8, -100] +# - [-2.0, 0.0, 1.5, 180] +# lidar: # lidar sensor configuration, check CARLA sensor reference for more details +# visualize: true +# v2x: +# communication_range: 35 +# ms-van3t: true +# behavior: +# <<: *base_behavior +# max_speed: 100 # maximum speed, km/h +# tailgate_speed: 111 +# overtake_allowed: true +# local_planner: +# debug_trajectory: true +# debug: false +# +# - name: cav4 +# spawn_position: [91.7194, 146.51, 0.3, 0, 0, 0] +# destination: [606.87, 145.39, 0] +# sensing: +# perception: +# activate: true +# camera: +# visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera +# num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) +# # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num +# positions: +# - [2.5, 0, 1.0, 0] +# - [0.0, 0.3, 1.8, 100] +# - [0.0, -0.3, 1.8, -100] +# - [-2.0, 0.0, 1.5, 180] +# lidar: # lidar sensor configuration, check CARLA sensor reference for more details +# visualize: true +# v2x: +# communication_range: 35 +# ms-van3t: true +# behavior: +# <<: *base_behavior +# max_speed: 100 # maximum speed, km/h +# tailgate_speed: 111 +# overtake_allowed: true +# local_planner: +# debug_trajectory: true +# debug: false diff --git a/opencda/scenario_testing/config_yaml/pedestrian_test.yaml b/opencda/scenario_testing/config_yaml/pedestrian_test.yaml new file mode 100644 index 000000000..146aa0c58 --- /dev/null +++ b/opencda/scenario_testing/config_yaml/pedestrian_test.yaml @@ -0,0 +1,246 @@ +# Spawn background vehicles by spawn position +#Transform(Location(x=-87.623032, y=12.967159, z=0.600000), Rotation(pitch=0.000000, yaw=-179.840790, roll=0.000000)) +#Transform(Location(x=-67.254570, y=27.963758, z=0.600000), Rotation(pitch=0.000000, yaw=0.159198, roll=0.000000)) + +carla_traffic_manager: + global_distance: 4.0 + global_speed_perc: -50 + ignore_lights_percentage: 0 + vehicle_list: + - spawn_position: + - -41.825108 + - -6.604221 + - 0.6 + - 0 + - -90.161217 + - 0 + vehicle_speed_perc: -200 + blueprint: motorcycle + - spawn_position: + - -87.623032 + - 12.967159 + - 0.3 + - 0 + - -179.840790 + - 0 + vehicle_speed_perc: -200 + blueprint: bicycle + - spawn_position: + - -67.254570 + - 27.963758 + - 0.3 + - 0 + - 0 + - 0 + vehicle_speed_perc: -200 + blueprint: Default + + + pedestrian_list: + - spawn_position: + - -36.741623 + - -76.261688 + - 0.177637 + - spawn_position: + - -49.452744 + - -77.609886 + - 0.177637 + - spawn_position: + - -35.085068 + - -18.718769 + - 0.177637 + - spawn_position: + - -65.538940 + - 7.304374 + - 0.177637 + - spawn_position: + - -32.596573 + - -35.497936 + - 0.177637 + - spawn_position: #ok + - -32.952545 + - -3.692704 + - 0.177637 + - spawn_position: + - -75.401207 + - 5.655548 + - 0.177637 + - spawn_position: + - -31.573521 + - -45.152374 + - 0.177637 + - spawn_position: + - -21.175884 + - -76.382408 + - 0.177637 + - + # if we do not specify, radius is around cav1 + pedestrian_by_radius: true + spawn_list_by_radius: + - n_pedestrian: 4 + radius: 10 + center: [-64,35] + same_destination: true + +vehicle_base: + sensing: + perception: + activate: false # when not activated, objects positions will be retrieved from server directly + camera: + visualize: 4 # how many camera images need to be visualized. 0 means no visualization for camera + num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num + positions: + - [ 2.5, 0, 1.0, 0 ] + - [ 0.0, 0.3, 1.8, 100 ] + - [ 0.0, -0.3, 1.8, -100 ] + - [ -2.0, 0.0, 1.5, 180 ] + lidar: # lidar sensor configuration, check CARLA sensor reference for more details + visualize: true + + localization: &base_localize + activate: false # when not activated, ego position will be retrieved from server directly + gnss: # gnss sensor configuration + heading_direction_stddev: 0.1 # degree + speed_stddev: 0.2 + debug_helper: + show_animation: false # whether to show real-time trajectory plotting + x_scale: 10.0 # used to multiply with the x coordinate to make the error on x axis clearer + y_scale: 10.0 # used to multiply with the y coordinate to make the error on y axis clearer + map_manager: + visualize: false + activate: false + behavior: &base_behavior + max_speed: 70 # maximum speed, km/h + tailgate_speed: 80 # when a vehicles needs to be close to another vehicle asap + overtake_allowed: true # whether overtake allowed, typically false for platoon leader + collision_time_ahead: 1.1 # used for collision checking + +platoon_base: + max_capacity: 10 + inter_gap: 0.2 # desired time gap + open_gap: 1.0 # open gap + warm_up_speed: 30 # required speed before cooperative merging + + +scenario: + town: + name: Town10HD + background_traffic: + vehicle_num: 20 # Spawn background vehicles by number (spawn position is randomly generated) + single_cav_list: # this is for merging vehicle or single cav without v2x + - name: cav1 + spawn_position: [-64.644844, 24.471010, 0.6, 0, 0.159198, 0] # Transform(Location(x=-64.644844, y=24.471010, z=0.600000), Rotation(pitch=0.000000, yaw=0.0, roll=0.000000)) + destination: [606.87, 145.39, 0] + sensing: + perception: + activate: true + camera: + visualize: 4 # how many camera images need to be visualized. 0 means no visualization for camera + num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num + positions: + - [2.5, 0, 1.0, 0] + - [0.0, 0.3, 1.8, 100] + - [0.0, -0.3, 1.8, -100] + - [-2.0, 0.0, 1.5, 180] + lidar: # lidar sensor configuration, check CARLA sensor reference for more details + visualize: true + v2x: + communication_range: 35 + ms-van3t: false # whether to use ms-van3t for communication + behavior: + <<: *base_behavior + max_speed: 70 # maximum speed, km/h + tailgate_speed: 111 + overtake_allowed: false + local_planner: + debug_trajectory: true + debug: false + +# - name: cav2 +# spawn_position: [-87.623032, 12.967159, 0.3, 0, 180, 0] +# destination: [606.87, 145.39, 0] +# sensing: +# perception: +# activate: true +# camera: +# visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera +# num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) +# # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num +# positions: +# - [2.5, 0, 1.0, 0] +# - [0.0, 0.3, 1.8, 100] +# - [0.0, -0.3, 1.8, -100] +# - [-2.0, 0.0, 1.5, 180] +# lidar: # lidar sensor configuration, check CARLA sensor reference for more details +# visualize: false +# v2x: +# communication_range: 35 +# ms-van3t: true +# behavior: +# <<: *base_behavior +# max_speed: 100 # maximum speed, km/h +# tailgate_speed: 111 +# overtake_allowed: true +# local_planner: +# debug_trajectory: true +# debug: false +## +# - name: cav3 +# spawn_position: [101.7194, 139.51, 0.3, 0, 0, 0] +# destination: [606.87, 145.39, 0] +# sensing: +# perception: +# activate: true +# camera: +# visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera +# num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) +# # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num +# positions: +# - [2.5, 0, 1.0, 0] +# - [0.0, 0.3, 1.8, 100] +# - [0.0, -0.3, 1.8, -100] +# - [-2.0, 0.0, 1.5, 180] +# lidar: # lidar sensor configuration, check CARLA sensor reference for more details +# visualize: true +# v2x: +# communication_range: 35 +# ms-van3t: true +# behavior: +# <<: *base_behavior +# max_speed: 100 # maximum speed, km/h +# tailgate_speed: 111 +# overtake_allowed: true +# local_planner: +# debug_trajectory: true +# debug: false +# +# - name: cav4 +# spawn_position: [91.7194, 146.51, 0.3, 0, 0, 0] +# destination: [606.87, 145.39, 0] +# sensing: +# perception: +# activate: true +# camera: +# visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera +# num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) +# # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num +# positions: +# - [2.5, 0, 1.0, 0] +# - [0.0, 0.3, 1.8, 100] +# - [0.0, -0.3, 1.8, -100] +# - [-2.0, 0.0, 1.5, 180] +# lidar: # lidar sensor configuration, check CARLA sensor reference for more details +# visualize: true +# v2x: +# communication_range: 35 +# ms-van3t: true +# behavior: +# <<: *base_behavior +# max_speed: 100 # maximum speed, km/h +# tailgate_speed: 111 +# overtake_allowed: true +# local_planner: +# debug_trajectory: true +# debug: false diff --git a/opencda/scenario_testing/ms_van3t_example.py b/opencda/scenario_testing/ms_van3t_example.py index 8680a1436..9d4efa77b 100644 --- a/opencda/scenario_testing/ms_van3t_example.py +++ b/opencda/scenario_testing/ms_van3t_example.py @@ -30,9 +30,7 @@ def run_scenario(opt, scenario_params): opt.apply_ml, opt.version, town=town, - cav_world=cav_world, - carla_host=opt.host, - carla_port=opt.port) + cav_world=cav_world) single_cav_list = \ scenario_manager.create_vehicle_manager(application=['single']) @@ -70,10 +68,6 @@ def run_scenario(opt, scenario_params): control = single_cav.run_step() single_cav.vehicle.apply_control(control) - for actor in scenario_manager.world.get_actors().filter("*vehicle*"): - location = actor.get_location() - scenario_manager.world.debug.draw_string(location, str(actor.id), False, carla.Color(200, 200, 0)) - step_event.set() ms_van3t_manager.carla_object.tick_event.wait() ms_van3t_manager.carla_object.tick_event.clear() diff --git a/opencda/scenario_testing/ms_van3t_example_ml.py b/opencda/scenario_testing/ms_van3t_example_ml.py index b839d7b82..d3f44b8e4 100644 --- a/opencda/scenario_testing/ms_van3t_example_ml.py +++ b/opencda/scenario_testing/ms_van3t_example_ml.py @@ -21,6 +21,7 @@ def run_scenario(opt, scenario_params): if 'name' in scenario_params['scenario']['town']: town = scenario_params['scenario']['town']['name'] + print('Loading ', town) else: print('No town name has been specified, please check the yaml file.') raise ValueError @@ -30,15 +31,13 @@ def run_scenario(opt, scenario_params): opt.apply_ml, opt.version, town=town, - cav_world=cav_world, - carla_host=opt.host, - carla_port=opt.port) + cav_world=cav_world) single_cav_list = \ scenario_manager.create_vehicle_manager(application=['single']) - traffic_manager, bg_veh_list = \ - scenario_manager.create_traffic_carla(port=opt.tm_port) + traffic_manager, bg_veh_list, all_actors = \ + scenario_manager.create_traffic_carla() step_event = Event() stop_event = Event() @@ -51,7 +50,7 @@ def run_scenario(opt, scenario_params): stop_event=stop_event) spectator = scenario_manager.world.get_spectator() - spectator_vehicle = single_cav_list[3].vehicle + spectator_vehicle = single_cav_list[0].vehicle transform = spectator_vehicle.get_transform() spectator.set_transform(carla.Transform(transform.location + carla.Location(z=60), @@ -67,21 +66,21 @@ def run_scenario(opt, scenario_params): for i, single_cav in enumerate(single_cav_list): single_cav.update_info_LDM() - control = single_cav.run_step() - single_cav.vehicle.apply_control(control) - - for actor in scenario_manager.world.get_actors().filter("*vehicle*"): - location = actor.get_location() - scenario_manager.world.debug.draw_string(location, str(actor.id), False, carla.Color(200, 200, 0)) + # control = single_cav.run_step() + # single_cav.vehicle.apply_control(control) step_event.set() ms_van3t_manager.carla_object.tick_event.wait() ms_van3t_manager.carla_object.tick_event.clear() + finally: stop_event.set() # stop the co-simulation step_event.set() # stop the co-simulation scenario_manager.close() print("Simulation finished.") + for i in range(0, len(all_actors), 2): + all_actors[i].stop() + for v in single_cav_list: v.destroy() diff --git a/opencda/scenario_testing/ms_van3t_example_ml_auto.py b/opencda/scenario_testing/pedestrian_test.py similarity index 63% rename from opencda/scenario_testing/ms_van3t_example_ml_auto.py rename to opencda/scenario_testing/pedestrian_test.py index 2b9995aa8..06f6329fa 100644 --- a/opencda/scenario_testing/ms_van3t_example_ml_auto.py +++ b/opencda/scenario_testing/pedestrian_test.py @@ -25,64 +25,62 @@ def run_scenario(opt, scenario_params): print('No town name has been specified, please check the yaml file.') raise ValueError - # create co-simulation scenario manager + # create scenario manager scenario_manager = sim_api.ScenarioManager(scenario_params, opt.apply_ml, opt.version, town=town, - cav_world=cav_world, carla_host=opt.host, - carla_port=opt.port) + carla_port=opt.port, + cav_world=cav_world) + # create CAVs (vehicles equipped with sensors) single_cav_list = \ - scenario_manager.create_vehicle_manager_auto(application=['single'], - port=opt.port) - - traffic_manager, bg_veh_list = \ - scenario_manager.create_traffic_carla(port=opt.tm_port) - - step_event = Event() - stop_event = Event() - ms_van3t_manager = \ - MsVan3tCoScenarioManager(scenario_params, - scenario_manager, - single_cav_list, - traffic_manager, - step_event=step_event, - stop_event=stop_event) + scenario_manager.create_vehicle_manager(application=['single']) + # create background traffic in carla + traffic_manager, bg_veh_list, all_actors = \ + scenario_manager.create_traffic_carla() + + # get spectactor spectator = scenario_manager.world.get_spectator() - spectator_vehicle = single_cav_list[3].vehicle + # assign the first CAV as the spectator vehicle + spectator_vehicle = single_cav_list[0].vehicle + # get the transform of the spectator vehicle transform = spectator_vehicle.get_transform() + # set the spectator to the top of the spectator vehicle spectator.set_transform(carla.Transform(transform.location + carla.Location(z=60), carla.Rotation(pitch=-90))) scenario_manager.tick() - while True: + location = single_cav_list[0].vehicle.get_location() + print("CAV spawned location:", location) + # Simulation loop + while True: + scenario_manager.tick() + # update spectator position transform = spectator_vehicle.get_transform() spectator.set_transform(carla.Transform(transform.location + carla.Location(z=60), carla.Rotation(pitch=-90))) for i, single_cav in enumerate(single_cav_list): + # update perception and localization info for CAV single_cav.update_info_LDM() + # Vehicle manager set with autopilot --> no control needed # control = single_cav.run_step() # single_cav.vehicle.apply_control(control) - for actor in scenario_manager.world.get_actors().filter("*vehicle*"): - location = actor.get_location() - scenario_manager.world.debug.draw_string(location, str(actor.id), False, carla.Color(200, 200, 0)) + finally: + print("Simulation finished.") + # for v in single_cav_list: + # v.destroy() + for i in range(0, len(all_actors), 2): + all_actors[i].stop() - step_event.set() - ms_van3t_manager.carla_object.tick_event.wait() - ms_van3t_manager.carla_object.tick_event.clear() + print(f'\nDestroying {int(len(all_actors)/2)} walkers and {int(len(single_cav_list))} CAVs') + scenario_manager.destroyActors() - finally: - stop_event.set() # stop the co-simulation - step_event.set() # stop the co-simulation scenario_manager.close() - print("Simulation finished.") - for v in single_cav_list: - v.destroy() diff --git a/opencda/scenario_testing/utils/ms_van3t_cosim_api.py b/opencda/scenario_testing/utils/ms_van3t_cosim_api.py index aa7b384b4..7ead85720 100644 --- a/opencda/scenario_testing/utils/ms_van3t_cosim_api.py +++ b/opencda/scenario_testing/utils/ms_van3t_cosim_api.py @@ -88,13 +88,77 @@ def ExecuteOneTimeStep(self, request, context): self.steps += 1 return carla_pb2.Boolean(value=True) - def GetManagedActorsIds(self, request, context): + def GetManagedActorsIds(self, request, context): # return only vehicle id # GetManagedHostIds() self.world.get_actors() actor_list = self.world.get_actors() vehicles = actor_list.filter('vehicle.*') returnValue = carla_pb2.ActorIds() for vehicle in vehicles: - returnValue.actorId.append(vehicle.id) + if not ('bicycle' in vehicle.type_id or 'motorcycle' in vehicle.type_id): + returnValue.actorId.append(vehicle.id) + return returnValue + + def GetAllActorsIds(self, request, context): # return vehicle and pedestrian id + self.world.get_actors() + actor_list = self.world.get_actors() + returnValue = carla_pb2.ActorIds() + for actor in actor_list: + if 'vehicle' in actor.type_id or 'pedestrian' in actor.type_id: + returnValue.actorId.append(actor.id) + return returnValue + + def GetActorById(self, request, context): + actor_list = self.world.get_actors() + actor = actor_list.find(request.num) + aLocation = actor.get_location() + aSpeed = actor.get_velocity() + aAcceleration = actor.get_acceleration() + returnValue = carla_pb2.Actor() + returnValue.id = request.num + returnValue.location.x = aLocation.x + returnValue.location.y = aLocation.y + returnValue.location.z = aLocation.z + returnValue.speed.x = aSpeed.x + returnValue.speed.y = aSpeed.y + returnValue.speed.z = aSpeed.z + + geoPos = self.world.get_map().transform_to_geolocation(aLocation) + returnValue.latitude = geoPos.latitude + returnValue.longitude = geoPos.longitude + returnValue.length = actor.bounding_box.extent.x * 2 + returnValue.width = actor.bounding_box.extent.y * 2 + + if 'vehicle' in actor.type_id: + returnValue.lane = abs(self.world.get_map().get_waypoint(aLocation).lane_id) + returnValue.heading = actor.get_transform().rotation.yaw + returnValue.transform.location.x = actor.get_transform().location.x + returnValue.transform.location.y = actor.get_transform().location.y + returnValue.transform.location.z = actor.get_transform().location.z + returnValue.transform.rotation.pitch = actor.get_transform().rotation.pitch + returnValue.transform.rotation.yaw = actor.get_transform().rotation.yaw + returnValue.transform.rotation.roll = actor.get_transform().rotation.roll + if 'bike' in actor.type_id: + returnValue.itsType = 1 + elif 'harley' in actor.type_id: + returnValue.itsType = 3 + else: + returnValue.itsType = 2 + + elif 'pedestrian' in actor.type_id: + returnValue.lane = 0 + returnValue.heading = actor.get_transform().rotation.yaw + if actor.get_transform().rotation.yaw < 0: + returnValue.heading += 360 + elif actor.get_transform().rotation.yaw > 360: + returnValue.heading -= 360 + returnValue.transform.location.x = actor.get_transform().location.x + returnValue.transform.location.y = actor.get_transform().location.y + returnValue.transform.location.z = actor.get_transform().location.z + returnValue.transform.rotation.pitch = 0 + returnValue.transform.rotation.yaw = returnValue.heading + returnValue.transform.rotation.roll = 0 + returnValue.itsType = 0 + return returnValue def GetManagedActorById(self, request, context): @@ -123,8 +187,16 @@ def GetManagedActorById(self, request, context): returnValue.width = actor.bounding_box.extent.y * 2 # print("bounding box: " + str(actor.bounding_box.extent.x) + ", " + str(actor.bounding_box.extent.y)) # I take abs of lane_id https://github.com/carla-simulator/carla/issues/1469 - returnValue.lane = abs(self.world.get_map().get_waypoint(aLocation).lane_id) - returnValue.heading = actor.get_transform().rotation.yaw + if 'vehicle' in actor.type_id: + returnValue.lane = abs(self.world.get_map().get_waypoint(aLocation).lane_id) + returnValue.heading = actor.get_transform().rotation.yaw + elif 'pedestrian' in actor.type_id: + returnValue.lane = 0 + returnValue.heading = actor.get_transform().rotation.yaw + if actor.get_transform().rotation.yaw < 0: + returnValue.heading += 360 + elif actor.get_transform().rotation.yaw > 360: + returnValue.heading -= 360 # This is redundant, but for CARLA compliance returnValue.transform.location.x = actor.get_transform().location.x returnValue.transform.location.y = actor.get_transform().location.y @@ -284,7 +356,8 @@ def GetActorLDM(self, request, context): ego_pos, ego_spd, objects = cav.getInfo() returnValue = carla_pb2.Objects() for PO in cav.LDM.getAllPOs(): - if (not PO.connected and PO.tracked and PO.onSight) or PO.CPM: + # if (PO.detected and PO.onSight) or PO.CPM or PO.VAM: + if (PO.detected and PO.tracked and PO.onSight) or PO.CPM or PO.VAM: object = carla_pb2.Object() object.id = PO.id if PO.CPM and len(PO.perceivedBy) == 0: @@ -294,34 +367,62 @@ def GetActorLDM(self, request, context): object.speed.x = PO.perception.xSpeed object.speed.y = PO.perception.ySpeed object.speed.z = 0 + if 70 < PO.id < 80: + print(f"Actor {PO.id} - speed x = {PO.perception.xSpeed}, y = {PO.perception.ySpeed}") object.acceleration.x = PO.perception.xacc object.acceleration.y = PO.perception.yacc object.acceleration.z = 0 - object.width = PO.perception.width object.length = PO.perception.length + object.width = PO.perception.width object.onSight = PO.onSight object.tracked = PO.tracked object.timestamp = int(1000 * (PO.getLatestPoint().timestamp - self.time_offset)) object.confidence = PO.perception.confidence - if PO.perception.yaw < 0: - object.yaw = PO.perception.yaw + 360 - else: - object.yaw = PO.perception.yaw - - curr_wpt = self.world.get_map().get_waypoint(carla.Location(x=PO.perception.xPosition, y=PO.perception.yPosition, z=0.0)) + # if PO.perception.yaw < 0: + # object.yaw = PO.perception.yaw + 360 + # else: + # object.yaw = PO.perception.yaw + # get_waypoint DOES NOT WORK FOR PEDESTRIANS + if any(x in PO.perception.itsType for x in ('vehicle', 'bicycle', 'motorcycle')): + curr_wpt = self.world.get_map().get_waypoint(carla.Location(x=PO.perception.xPosition, y=PO.perception.yPosition, z=0.0)) + object.yaw = curr_wpt.transform.rotation.yaw + elif PO.perception.itsType == 'pedestrian': + try: + curr_wpt = self.world.get_actors().find(object.id).get_transform() + object.yaw = curr_wpt.rotation.yaw + except AttributeError: + print(f"Error: Actor with ID {object.id} not found in the CARLA world.") + continue + curr_wpt = None # Or assign a default value, raise another exception, etc. + object.yaw = 0 # TODO: this is a workaround, the yaw should be the one from the perception, but pose estimation is not reliable - object.yaw = curr_wpt.transform.rotation.yaw + if object.yaw < 0: + object.yaw += 360 + if object.yaw > 360: + object.yaw -= 360 object.transform.location.x = PO.perception.xPosition object.transform.location.y = PO.perception.yPosition object.transform.location.z = 0 object.transform.rotation.pitch = 0 object.transform.rotation.yaw = PO.perception.yaw object.transform.rotation.roll = 0 - object.detected = not PO.connected + object.detected = PO.detected if len(PO.perceivedBy) > 0: object.perceivedBy = PO.perceivedBy[0] else: object.perceivedBy = -1 + if PO.perception.itsType == 'vehicle': + object.itsType = 2 + elif PO.perception.itsType == 'pedestrian': + object.itsType = 0 + elif PO.perception.itsType == 'bicycle': + object.itsType = 1 + elif PO.perception.itsType == 'motorcycle': + object.itsType = 3 + elif PO.perception.itsType == 'unknown': + object.itsType = 4 + object.VAM = PO.VAM + returnValue.objects.append(object) return returnValue @@ -430,11 +531,20 @@ def InsertObject(self, request, context): newPO.heading = request.object.yaw newPO.yaw = request.object.yaw newPO.id = request.object.id + if (request.object.itsType == 2): + newPO.itsType = "vehicle" + if (request.object.itsType == 0): + newPO.itsType = "pedestrian" + if (request.object.itsType == 1): + newPO.itsType = "bicycle" + if (request.object.itsType == 3): + newPO.itsType = "motorcycle" toInsert = [newPO] cav.ldm_mutex.acquire() - if request.object.detected: + if request.object.VAM: + cav.LDM.VAMfusion(newPO) + elif request.object.detected: cav.LDM.CPMfusion(toInsert, request.fromId) - else: cav.LDM.CAMfusion(newPO) cav.ldm_mutex.release() diff --git a/opencda/scenario_testing/utils/proto/carla_pb2.py b/opencda/scenario_testing/utils/proto/carla_pb2.py index 3e93a461a..dd9b18351 100644 --- a/opencda/scenario_testing/utils/proto/carla_pb2.py +++ b/opencda/scenario_testing/utils/proto/carla_pb2.py @@ -15,7 +15,7 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x63\x61rla.proto\x12\x05\x63\x61rla\x1a\x1bgoogle/protobuf/empty.proto\"\x1b\n\x08\x41\x63torIds\x12\x0f\n\x07\x61\x63torId\x18\x01 \x03(\x05\"\x15\n\x06Number\x12\x0b\n\x03num\x18\x01 \x01(\x05\"\x81\x02\n\x07Vehicle\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x1c\n\x05speed\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12#\n\x0c\x61\x63\x63\x65leration\x18\x03 \x01(\x0b\x32\r.carla.Vector\x12\x1f\n\x08location\x18\x04 \x01(\x0b\x32\r.carla.Vector\x12\x10\n\x08latitude\x18\x05 \x01(\x01\x12\x11\n\tlongitude\x18\x06 \x01(\x01\x12\x0e\n\x06length\x18\x07 \x01(\x01\x12\r\n\x05width\x18\x08 \x01(\x01\x12\x0c\n\x04lane\x18\t \x01(\x05\x12\x0f\n\x07heading\x18\n \x01(\x01\x12#\n\ttransform\x18\x0b \x01(\x0b\x32\x10.carla.Transform\")\n\x06Vector\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"4\n\x08Rotation\x12\r\n\x05pitch\x18\x01 \x01(\x01\x12\x0b\n\x03yaw\x18\x02 \x01(\x01\x12\x0c\n\x04roll\x18\x03 \x01(\x01\"O\n\tTransform\x12\x1f\n\x08location\x18\x01 \x01(\x0b\x32\r.carla.Vector\x12!\n\x08rotation\x18\x02 \x01(\x0b\x32\x0f.carla.Rotation\"\xb0\x02\n\x06Object\x12\n\n\x02id\x18\x01 \x01(\x05\x12\n\n\x02\x64x\x18\x02 \x01(\x01\x12\n\n\x02\x64y\x18\x03 \x01(\x01\x12\x1c\n\x05speed\x18\x04 \x01(\x0b\x32\r.carla.Vector\x12#\n\x0c\x61\x63\x63\x65leration\x18\x05 \x01(\x0b\x32\r.carla.Vector\x12\x0e\n\x06length\x18\x06 \x01(\x01\x12\r\n\x05width\x18\x07 \x01(\x01\x12\x0f\n\x07onSight\x18\x08 \x01(\x08\x12\x0f\n\x07tracked\x18\t \x01(\x08\x12\x11\n\ttimestamp\x18\n \x01(\x05\x12\x12\n\nconfidence\x18\x0b \x01(\x01\x12\x0b\n\x03yaw\x18\x0c \x01(\x01\x12#\n\ttransform\x18\r \x01(\x0b\x32\x10.carla.Transform\x12\x10\n\x08\x64\x65tected\x18\x0e \x01(\x08\x12\x13\n\x0bperceivedBy\x18\x0f \x01(\x05\")\n\x07Objects\x12\x1e\n\x07objects\x18\x01 \x03(\x0b\x32\r.carla.Object\"\x18\n\x07\x42oolean\x12\r\n\x05value\x18\x01 \x01(\x08\"M\n\tObjectsIn\x12!\n\ncpmObjects\x18\x01 \x03(\x0b\x32\r.carla.Object\x12\r\n\x05\x65goId\x18\x02 \x01(\x05\x12\x0e\n\x06\x66romId\x18\x03 \x01(\x05\"H\n\x08ObjectIn\x12\x1d\n\x06object\x18\x01 \x01(\x0b\x32\r.carla.Object\x12\r\n\x05\x65goId\x18\x02 \x01(\x05\x12\x0e\n\x06\x66romId\x18\x03 \x01(\x05\"[\n\x07\x43ontrol\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x1f\n\x08waypoint\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12\r\n\x05speed\x18\x03 \x01(\x01\x12\x14\n\x0c\x61\x63\x63\x65leration\x18\x04 \x01(\x01\"\xd7\x01\n\x08Waypoint\x12\x1f\n\x08location\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12!\n\x08rotation\x18\x03 \x01(\x0b\x32\x0f.carla.Rotation\x12\x0f\n\x07road_id\x18\x04 \x01(\x05\x12\x12\n\nsection_id\x18\x05 \x01(\x05\x12\x13\n\x0bis_junction\x18\x06 \x01(\x08\x12\x13\n\x0bjunction_id\x18\x07 \x01(\x05\x12\x0f\n\x07lane_id\x18\x08 \x01(\x05\x12\x12\n\nlane_width\x18\t \x01(\x01\x12\x13\n\x0blane_change\x18\n \x01(\x05\"_\n\rObjectMinimal\x12\n\n\x02id\x18\x01 \x01(\x05\x12#\n\ttransform\x18\x02 \x01(\x0b\x32\x10.carla.Transform\x12\x0e\n\x06length\x18\x03 \x01(\x01\x12\r\n\x05width\x18\x04 \x01(\x01\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\x32\xcd\x07\n\x0c\x43\x61rlaAdapter\x12<\n\x12\x45xecuteOneTimeStep\x12\x16.google.protobuf.Empty\x1a\x0e.carla.Boolean\x12\x38\n\x06\x46inish\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12>\n\x13GetManagedActorsIds\x12\x16.google.protobuf.Empty\x1a\x0f.carla.ActorIds\x12<\n\x11GetManagedCAVsIds\x12\x16.google.protobuf.Empty\x1a\x0f.carla.ActorIds\x12\x34\n\x13GetManagedActorById\x12\r.carla.Number\x1a\x0e.carla.Vehicle\x12.\n\rInsertVehicle\x12\x0e.carla.Vehicle\x1a\r.carla.Number\x12?\n\x13GetRandomSpawnPoint\x12\x16.google.protobuf.Empty\x1a\x10.carla.Transform\x12,\n\x0bGetActorLDM\x12\r.carla.Number\x1a\x0e.carla.Objects\x12.\n\x0cInsertObject\x12\x0f.carla.ObjectIn\x1a\r.carla.Number\x12\x35\n\rInsertObjects\x12\x10.carla.ObjectsIn\x1a\x12.carla.DoubleValue\x12/\n\x08InsertCV\x12\x0f.carla.ObjectIn\x1a\x12.carla.DoubleValue\x12,\n\x0cGetCartesian\x12\r.carla.Vector\x1a\r.carla.Vector\x12&\n\x06GetGeo\x12\r.carla.Vector\x1a\r.carla.Vector\x12\'\n\x06hasLDM\x12\r.carla.Number\x1a\x0e.carla.Boolean\x12\x34\n\nSetControl\x12\x0e.carla.Control\x1a\x16.google.protobuf.Empty\x12\x32\n\x10GetCarlaWaypoint\x12\r.carla.Vector\x1a\x0f.carla.Waypoint\x12\x36\n\x14GetNextCarlaWaypoint\x12\r.carla.Vector\x1a\x0f.carla.Waypoint\x12\x39\n\rGetGTaccuracy\x12\x14.carla.ObjectMinimal\x1a\x12.carla.DoubleValueb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x63\x61rla.proto\x12\x05\x63\x61rla\x1a\x1bgoogle/protobuf/empty.proto\"\x1b\n\x08\x41\x63torIds\x12\x0f\n\x07\x61\x63torId\x18\x01 \x03(\x05\"\x15\n\x06Number\x12\x0b\n\x03num\x18\x01 \x01(\x05\"\xeb\x01\n\x05\x41\x63tor\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x1f\n\x08location\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12\x1c\n\x05speed\x18\x03 \x01(\x0b\x32\r.carla.Vector\x12\x10\n\x08latitude\x18\x04 \x01(\x01\x12\x11\n\tlongitude\x18\x05 \x01(\x01\x12\x0e\n\x06length\x18\x06 \x01(\x01\x12\r\n\x05width\x18\x07 \x01(\x01\x12\x0c\n\x04lane\x18\x08 \x01(\x05\x12\x0f\n\x07heading\x18\t \x01(\x01\x12#\n\ttransform\x18\n \x01(\x0b\x32\x10.carla.Transform\x12\x0f\n\x07itsType\x18\x0b \x01(\x05\"\x81\x02\n\x07Vehicle\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x1c\n\x05speed\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12#\n\x0c\x61\x63\x63\x65leration\x18\x03 \x01(\x0b\x32\r.carla.Vector\x12\x1f\n\x08location\x18\x04 \x01(\x0b\x32\r.carla.Vector\x12\x10\n\x08latitude\x18\x05 \x01(\x01\x12\x11\n\tlongitude\x18\x06 \x01(\x01\x12\x0e\n\x06length\x18\x07 \x01(\x01\x12\r\n\x05width\x18\x08 \x01(\x01\x12\x0c\n\x04lane\x18\t \x01(\x05\x12\x0f\n\x07heading\x18\n \x01(\x01\x12#\n\ttransform\x18\x0b \x01(\x0b\x32\x10.carla.Transform\")\n\x06Vector\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"4\n\x08Rotation\x12\r\n\x05pitch\x18\x01 \x01(\x01\x12\x0b\n\x03yaw\x18\x02 \x01(\x01\x12\x0c\n\x04roll\x18\x03 \x01(\x01\"O\n\tTransform\x12\x1f\n\x08location\x18\x01 \x01(\x0b\x32\r.carla.Vector\x12!\n\x08rotation\x18\x02 \x01(\x0b\x32\x0f.carla.Rotation\"\xce\x02\n\x06Object\x12\n\n\x02id\x18\x01 \x01(\x05\x12\n\n\x02\x64x\x18\x02 \x01(\x01\x12\n\n\x02\x64y\x18\x03 \x01(\x01\x12\x1c\n\x05speed\x18\x04 \x01(\x0b\x32\r.carla.Vector\x12#\n\x0c\x61\x63\x63\x65leration\x18\x05 \x01(\x0b\x32\r.carla.Vector\x12\x0e\n\x06length\x18\x06 \x01(\x01\x12\r\n\x05width\x18\x07 \x01(\x01\x12\x0f\n\x07onSight\x18\x08 \x01(\x08\x12\x0f\n\x07tracked\x18\t \x01(\x08\x12\x11\n\ttimestamp\x18\n \x01(\x05\x12\x12\n\nconfidence\x18\x0b \x01(\x01\x12\x0b\n\x03yaw\x18\x0c \x01(\x01\x12#\n\ttransform\x18\r \x01(\x0b\x32\x10.carla.Transform\x12\x10\n\x08\x64\x65tected\x18\x0e \x01(\x08\x12\x13\n\x0bperceivedBy\x18\x0f \x01(\x05\x12\x0f\n\x07itsType\x18\x10 \x01(\x05\x12\x0b\n\x03VAM\x18\x11 \x01(\x08\")\n\x07Objects\x12\x1e\n\x07objects\x18\x01 \x03(\x0b\x32\r.carla.Object\"\x18\n\x07\x42oolean\x12\r\n\x05value\x18\x01 \x01(\x08\"M\n\tObjectsIn\x12!\n\ncpmObjects\x18\x01 \x03(\x0b\x32\r.carla.Object\x12\r\n\x05\x65goId\x18\x02 \x01(\x05\x12\x0e\n\x06\x66romId\x18\x03 \x01(\x05\"H\n\x08ObjectIn\x12\x1d\n\x06object\x18\x01 \x01(\x0b\x32\r.carla.Object\x12\r\n\x05\x65goId\x18\x02 \x01(\x05\x12\x0e\n\x06\x66romId\x18\x03 \x01(\x05\"[\n\x07\x43ontrol\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x1f\n\x08waypoint\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12\r\n\x05speed\x18\x03 \x01(\x01\x12\x14\n\x0c\x61\x63\x63\x65leration\x18\x04 \x01(\x01\"\xd7\x01\n\x08Waypoint\x12\x1f\n\x08location\x18\x02 \x01(\x0b\x32\r.carla.Vector\x12!\n\x08rotation\x18\x03 \x01(\x0b\x32\x0f.carla.Rotation\x12\x0f\n\x07road_id\x18\x04 \x01(\x05\x12\x12\n\nsection_id\x18\x05 \x01(\x05\x12\x13\n\x0bis_junction\x18\x06 \x01(\x08\x12\x13\n\x0bjunction_id\x18\x07 \x01(\x05\x12\x0f\n\x07lane_id\x18\x08 \x01(\x05\x12\x12\n\nlane_width\x18\t \x01(\x01\x12\x13\n\x0blane_change\x18\n \x01(\x05\"_\n\rObjectMinimal\x12\n\n\x02id\x18\x01 \x01(\x05\x12#\n\ttransform\x18\x02 \x01(\x0b\x32\x10.carla.Transform\x12\x0e\n\x06length\x18\x03 \x01(\x01\x12\r\n\x05width\x18\x04 \x01(\x01\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\x32\xb6\x08\n\x0c\x43\x61rlaAdapter\x12<\n\x12\x45xecuteOneTimeStep\x12\x16.google.protobuf.Empty\x1a\x0e.carla.Boolean\x12\x38\n\x06\x46inish\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12>\n\x13GetManagedActorsIds\x12\x16.google.protobuf.Empty\x1a\x0f.carla.ActorIds\x12<\n\x11GetManagedCAVsIds\x12\x16.google.protobuf.Empty\x1a\x0f.carla.ActorIds\x12\x34\n\x13GetManagedActorById\x12\r.carla.Number\x1a\x0e.carla.Vehicle\x12:\n\x0fGetAllActorsIds\x12\x16.google.protobuf.Empty\x1a\x0f.carla.ActorIds\x12+\n\x0cGetActorById\x12\r.carla.Number\x1a\x0c.carla.Actor\x12.\n\rInsertVehicle\x12\x0e.carla.Vehicle\x1a\r.carla.Number\x12?\n\x13GetRandomSpawnPoint\x12\x16.google.protobuf.Empty\x1a\x10.carla.Transform\x12,\n\x0bGetActorLDM\x12\r.carla.Number\x1a\x0e.carla.Objects\x12.\n\x0cInsertObject\x12\x0f.carla.ObjectIn\x1a\r.carla.Number\x12\x35\n\rInsertObjects\x12\x10.carla.ObjectsIn\x1a\x12.carla.DoubleValue\x12/\n\x08InsertCV\x12\x0f.carla.ObjectIn\x1a\x12.carla.DoubleValue\x12,\n\x0cGetCartesian\x12\r.carla.Vector\x1a\r.carla.Vector\x12&\n\x06GetGeo\x12\r.carla.Vector\x1a\r.carla.Vector\x12\'\n\x06hasLDM\x12\r.carla.Number\x1a\x0e.carla.Boolean\x12\x34\n\nSetControl\x12\x0e.carla.Control\x1a\x16.google.protobuf.Empty\x12\x32\n\x10GetCarlaWaypoint\x12\r.carla.Vector\x1a\x0f.carla.Waypoint\x12\x36\n\x14GetNextCarlaWaypoint\x12\r.carla.Vector\x1a\x0f.carla.Waypoint\x12\x39\n\rGetGTaccuracy\x12\x14.carla.ObjectMinimal\x1a\x12.carla.DoubleValueb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,32 +26,34 @@ _globals['_ACTORIDS']._serialized_end=78 _globals['_NUMBER']._serialized_start=80 _globals['_NUMBER']._serialized_end=101 - _globals['_VEHICLE']._serialized_start=104 - _globals['_VEHICLE']._serialized_end=361 - _globals['_VECTOR']._serialized_start=363 - _globals['_VECTOR']._serialized_end=404 - _globals['_ROTATION']._serialized_start=406 - _globals['_ROTATION']._serialized_end=458 - _globals['_TRANSFORM']._serialized_start=460 - _globals['_TRANSFORM']._serialized_end=539 - _globals['_OBJECT']._serialized_start=542 - _globals['_OBJECT']._serialized_end=846 - _globals['_OBJECTS']._serialized_start=848 - _globals['_OBJECTS']._serialized_end=889 - _globals['_BOOLEAN']._serialized_start=891 - _globals['_BOOLEAN']._serialized_end=915 - _globals['_OBJECTSIN']._serialized_start=917 - _globals['_OBJECTSIN']._serialized_end=994 - _globals['_OBJECTIN']._serialized_start=996 - _globals['_OBJECTIN']._serialized_end=1068 - _globals['_CONTROL']._serialized_start=1070 - _globals['_CONTROL']._serialized_end=1161 - _globals['_WAYPOINT']._serialized_start=1164 - _globals['_WAYPOINT']._serialized_end=1379 - _globals['_OBJECTMINIMAL']._serialized_start=1381 - _globals['_OBJECTMINIMAL']._serialized_end=1476 - _globals['_DOUBLEVALUE']._serialized_start=1478 - _globals['_DOUBLEVALUE']._serialized_end=1506 - _globals['_CARLAADAPTER']._serialized_start=1509 - _globals['_CARLAADAPTER']._serialized_end=2482 + _globals['_ACTOR']._serialized_start=104 + _globals['_ACTOR']._serialized_end=339 + _globals['_VEHICLE']._serialized_start=342 + _globals['_VEHICLE']._serialized_end=599 + _globals['_VECTOR']._serialized_start=601 + _globals['_VECTOR']._serialized_end=642 + _globals['_ROTATION']._serialized_start=644 + _globals['_ROTATION']._serialized_end=696 + _globals['_TRANSFORM']._serialized_start=698 + _globals['_TRANSFORM']._serialized_end=777 + _globals['_OBJECT']._serialized_start=780 + _globals['_OBJECT']._serialized_end=1114 + _globals['_OBJECTS']._serialized_start=1116 + _globals['_OBJECTS']._serialized_end=1157 + _globals['_BOOLEAN']._serialized_start=1159 + _globals['_BOOLEAN']._serialized_end=1183 + _globals['_OBJECTSIN']._serialized_start=1185 + _globals['_OBJECTSIN']._serialized_end=1262 + _globals['_OBJECTIN']._serialized_start=1264 + _globals['_OBJECTIN']._serialized_end=1336 + _globals['_CONTROL']._serialized_start=1338 + _globals['_CONTROL']._serialized_end=1429 + _globals['_WAYPOINT']._serialized_start=1432 + _globals['_WAYPOINT']._serialized_end=1647 + _globals['_OBJECTMINIMAL']._serialized_start=1649 + _globals['_OBJECTMINIMAL']._serialized_end=1744 + _globals['_DOUBLEVALUE']._serialized_start=1746 + _globals['_DOUBLEVALUE']._serialized_end=1774 + _globals['_CARLAADAPTER']._serialized_start=1777 + _globals['_CARLAADAPTER']._serialized_end=2855 # @@protoc_insertion_point(module_scope) diff --git a/opencda/scenario_testing/utils/proto/carla_pb2_grpc.py b/opencda/scenario_testing/utils/proto/carla_pb2_grpc.py index 5108804d0..95c2ee862 100644 --- a/opencda/scenario_testing/utils/proto/carla_pb2_grpc.py +++ b/opencda/scenario_testing/utils/proto/carla_pb2_grpc.py @@ -40,6 +40,16 @@ def __init__(self, channel): request_serializer=carla__pb2.Number.SerializeToString, response_deserializer=carla__pb2.Vehicle.FromString, ) + self.GetAllActorsIds = channel.unary_unary( + '/carla.CarlaAdapter/GetAllActorsIds', + request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + response_deserializer=carla__pb2.ActorIds.FromString, + ) + self.GetActorById = channel.unary_unary( + '/carla.CarlaAdapter/GetActorById', + request_serializer=carla__pb2.Number.SerializeToString, + response_deserializer=carla__pb2.Actor.FromString, + ) self.InsertVehicle = channel.unary_unary( '/carla.CarlaAdapter/InsertVehicle', request_serializer=carla__pb2.Vehicle.SerializeToString, @@ -140,6 +150,18 @@ def GetManagedActorById(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetAllActorsIds(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetActorById(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def InsertVehicle(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -246,6 +268,16 @@ def add_CarlaAdapterServicer_to_server(servicer, server): request_deserializer=carla__pb2.Number.FromString, response_serializer=carla__pb2.Vehicle.SerializeToString, ), + 'GetAllActorsIds': grpc.unary_unary_rpc_method_handler( + servicer.GetAllActorsIds, + request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + response_serializer=carla__pb2.ActorIds.SerializeToString, + ), + 'GetActorById': grpc.unary_unary_rpc_method_handler( + servicer.GetActorById, + request_deserializer=carla__pb2.Number.FromString, + response_serializer=carla__pb2.Actor.SerializeToString, + ), 'InsertVehicle': grpc.unary_unary_rpc_method_handler( servicer.InsertVehicle, request_deserializer=carla__pb2.Vehicle.FromString, @@ -406,6 +438,40 @@ def GetManagedActorById(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def GetAllActorsIds(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/carla.CarlaAdapter/GetAllActorsIds', + google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + carla__pb2.ActorIds.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetActorById(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/carla.CarlaAdapter/GetActorById', + carla__pb2.Number.SerializeToString, + carla__pb2.Actor.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def InsertVehicle(request, target, diff --git a/opencda/scenario_testing/utils/sim_api.py b/opencda/scenario_testing/utils/sim_api.py index 8e996686a..444a684ee 100644 --- a/opencda/scenario_testing/utils/sim_api.py +++ b/opencda/scenario_testing/utils/sim_api.py @@ -14,6 +14,7 @@ from random import shuffle from omegaconf import OmegaConf from omegaconf.listconfig import ListConfig +import logging import carla import numpy as np @@ -48,6 +49,7 @@ def car_blueprint_filter(blueprint_library, carla_version='0.9.11'): The list of suitable blueprints for vehicles. """ + if carla_version == '0.9.11': print('old version') blueprints = [ @@ -163,7 +165,7 @@ class ScenarioManager: CAV World that contains the information of all CAVs. carla_map : carla.map - Car;a HD Map. + Carla HD Map. """ @@ -273,7 +275,7 @@ def create_vehicle_manager(self, application, map_helper=None, data_dump=False, pldm=False, - log_dir=None): + log_dir=None,port=8000): """ Create a list of single CAVs. @@ -296,6 +298,19 @@ def create_vehicle_manager(self, application, """ print('Creating single CAVs.') # By default, we use lincoln as our cav model. + traffic_config = self.scenario_params['carla_traffic_manager'] + if port != 8000: + tm = self.client.get_trafficmanager(port) + else: + tm = self.client.get_trafficmanager() + + tm.set_global_distance_to_leading_vehicle( + traffic_config['global_distance']) + tm.set_synchronous_mode(traffic_config['sync_mode']) + tm.set_osm_mode(traffic_config['set_osm_mode']) + tm.global_percentage_speed_difference( + traffic_config['global_speed_perc']) + default_model = 'vehicle.lincoln.mkz2017' \ if self.carla_version == '0.9.11' else 'vehicle.lincoln.mkz_2017' @@ -346,27 +361,7 @@ def create_vehicle_manager(self, application, # self.world.debug.draw_string(ego_pos.location, str(self.vehicle.id), False, carla.Color(200, 200, 0)) - # if 'intruder' in cav_config['v2x']: - # if cav_config['v2x']['intruder']: - # cav_vehicle_bp.set_attribute('color', '255, 0, 0') - # vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # - # vehicle_manager = ExtendedVehicleManager( - # vehicle, cav_config, 'intruder', - # self.carla_map, self.cav_world, - # current_time=self.scenario_params['current_time'], - # data_dumping=data_dump, - # pldm=False, log_dir=log_dir) - # else: - # cav_vehicle_bp.set_attribute('color', '0, 0, 255') - # vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # # create vehicle manager for each cav - # vehicle_manager = ExtendedVehicleManager( - # vehicle, cav_config, application, - # self.carla_map, self.cav_world, - # current_time=self.scenario_params['current_time'], - # data_dumping=data_dump, - # pldm=pldm, log_dir=log_dir) + if 'ms-van3t' in cav_config['v2x']: cav_vehicle_bp.set_attribute('color', '0, 0, 255') if 'intruder' in cav_config['v2x']: @@ -394,6 +389,14 @@ def create_vehicle_manager(self, application, data_dumping=data_dump, pldm=pldm, log_dir=log_dir) + vehicle.set_autopilot(True, tm.get_port()) + + if 'vehicle_speed_perc' in cav_config: + tm.vehicle_percentage_speed_difference( + vehicle, cav_config['vehicle_speed_perc']) + tm.auto_lane_change(vehicle, traffic_config['auto_lane_change']) + tm.ignore_lights_percentage(vehicle, 0) + self.world.tick() vehicle_manager.v2x_manager.set_platoon(None) @@ -407,18 +410,53 @@ def create_vehicle_manager(self, application, destination, clean=True) + single_cav_list.append(vehicle_manager) return single_cav_list def create_vehicle_manager_auto(self, application, - map_helper=None, - data_dump=False, - pldm=False, - log_dir=None, - port=8000): + map_helper=None, + data_dump=False, + x=0, y=0, z=0, + number=1, + random_bp=False, + spawnPoint=None): + single_cav_list = [] + number = len(self.scenario_params['scenario']['single_cav_list']) + for i in range(number): + cav_config = self.scenario_params['scenario']['single_cav_list'][i] + platoon_base = OmegaConf.create({'platoon': self.scenario_params.get('platoon_base', {})}) + cav_config = OmegaConf.merge(self.scenario_params['vehicle_base'], + platoon_base, + cav_config) + spawn_transform = carla.Transform( + carla.Location( + x=cav_config['spawn_position'][0], + y=cav_config['spawn_position'][1], + z=cav_config['spawn_position'][2]), + carla.Rotation( + pitch=cav_config['spawn_position'][5], + yaw=cav_config['spawn_position'][4], + roll=cav_config['spawn_position'][3])) + + cav = self.create_single_vehicle_manager_auto(application, map_helper, data_dump, i, random_bp, number=i, + spawnPoint=spawn_transform) + for c in cav: + single_cav_list.append(c) + + return single_cav_list + + + def create_single_vehicle_manager_auto(self, application, + map_helper=None, + data_dump=False, + x=0, y=0, z=0, + number=1, + random_bp=False, + spawnPoint=None): """ - Create a list of single CAVs to be controlled by carla's traffic manager. + Create a list of single CAVs. Parameters ---------- @@ -432,155 +470,86 @@ def create_vehicle_manager_auto(self, application, data_dump : bool Whether to dump sensor data. - port : int - The port number of the traffic manager. - Returns ------- single_cav_list : list A list contains all single CAVs' vehicle manager. """ - print('Creating single CAVs.') - - traffic_config = self.scenario_params['carla_traffic_manager'] - if port != 8000: - tm = self.client.get_trafficmanager(port) - else: - tm = self.client.get_trafficmanager() - - tm.set_global_distance_to_leading_vehicle( - traffic_config['global_distance']) - tm.set_synchronous_mode(traffic_config['sync_mode']) - tm.set_osm_mode(traffic_config['set_osm_mode']) - tm.global_percentage_speed_difference( - traffic_config['global_speed_perc']) + print('Creating single CAV.') # By default, we use lincoln as our cav model. default_model = 'vehicle.lincoln.mkz2017' \ if self.carla_version == '0.9.11' else 'vehicle.lincoln.mkz_2017' - cav_vehicle_bp = \ - self.world.get_blueprint_library().find(default_model) + if random_bp: + cav_vehicle_bp = \ + random.choice(self.world.get_blueprint_library().filter('vehicle.*')) + else: + cav_vehicle_bp = \ + self.world.get_blueprint_library().find(default_model) single_cav_list = [] - for i, cav_config in enumerate( - self.scenario_params['scenario']['single_cav_list']): - # in case the cav wants to join a platoon later - # it will be empty dictionary for single cav application - platoon_base = OmegaConf.create({'platoon': self.scenario_params.get('platoon_base', {})}) - cav_config = OmegaConf.merge(self.scenario_params['vehicle_base'], - platoon_base, - cav_config) - # if the spawn position is a single scalar, we need to use map - # helper to transfer to spawn transform - if 'spawn_special' not in cav_config: - spawn_transform = carla.Transform( - carla.Location( - x=cav_config['spawn_position'][0], - y=cav_config['spawn_position'][1], - z=cav_config['spawn_position'][2]), - carla.Rotation( - pitch=cav_config['spawn_position'][5], - yaw=cav_config['spawn_position'][4], - roll=cav_config['spawn_position'][3])) - else: - # spawn_transform = map_helper(self.carla_version, - # cav_config['spawn_special'][0]) - transform_point = carla.Transform(carla.Location(x=-1202.0827, - y=458.2501, - z=0.3), - carla.Rotation(yaw=-20.4866)) + # in case the cav wants to join a platoon later + # it will be empty dictionary for single cav application + cav_config = self.scenario_params['scenario']['single_cav_list'][number] + platoon_base = OmegaConf.create({'platoon': self.scenario_params.get('platoon_base', {})}) + cav_config = OmegaConf.merge(self.scenario_params['vehicle_base'], + platoon_base, + cav_config) - begin_point = carla.Transform(carla.Location(x=-16.7102, - y=15.3622, - z=0.3), - carla.Rotation(yaw=-20.4866)) + self.world.get_actors() + actor_list = self.world.get_actors() + vehicles = actor_list.filter('vehicle.*') + spawnPoints = self.world.get_map().get_spawn_points() - transform_point.location.x = transform_point.location.x + cav_config['spawn_special'][0] * \ - (begin_point.location.x - - transform_point.location.x) - transform_point.location.y = transform_point.location.y + cav_config['spawn_special'][0] * \ - (begin_point.location.y - - transform_point.location.y) - spawn_transform = transform_point - # self.world.debug.draw_string(ego_pos.location, str(self.vehicle.id), False, carla.Color(200, 200, 0)) + # iterate over the spawn points and do self.world.debug.draw_string(spawnPoints[i].location, str(round(i)), False, carla.Color(200, 200, 0), 10) + for i in range(len(spawnPoints)): + x = spawnPoints[i].location.x + y = spawnPoints[i].location.y + text = f"{round(i)} - ({x},{y})" + self.world.debug.draw_string(spawnPoints[i].location, text, False, carla.Color(200, 200, 0), 10) - # if 'intruder' in cav_config['v2x']: - # if cav_config['v2x']['intruder']: - # cav_vehicle_bp.set_attribute('color', '255, 0, 0') - # vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # - # vehicle_manager = ExtendedVehicleManager( - # vehicle, cav_config, 'intruder', - # self.carla_map, self.cav_world, - # current_time=self.scenario_params['current_time'], - # data_dumping=data_dump, - # pldm=False, log_dir=log_dir) - # else: - # cav_vehicle_bp.set_attribute('color', '0, 0, 255') - # vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # # create vehicle manager for each cav - # vehicle_manager = ExtendedVehicleManager( - # vehicle, cav_config, application, - # self.carla_map, self.cav_world, - # current_time=self.scenario_params['current_time'], - # data_dumping=data_dump, - # pldm=pldm, log_dir=log_dir) - if 'ms-van3t' in cav_config['v2x']: - cav_vehicle_bp.set_attribute('color', '0, 0, 255') - if 'intruder' in cav_config['v2x']: - cav_vehicle_bp.set_attribute('color', '255, 0, 0') - # print ('transform:', spawn_transform) - vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # create vehicle manager for each cav - vehicle_manager = ExtendedVehicleManager( - vehicle, cav_config, application, - self.carla_map, self.cav_world, - current_time=self.scenario_params['current_time'], - data_dumping=data_dump, - pldm=pldm, log_dir=log_dir, ms_vanet=True) + cav_vehicle_bp.set_attribute('color', '0, 255, 0') - vehicle.set_autopilot(True, tm.get_port()) - if 'vehicle_speed_perc' in cav_config: - tm.vehicle_percentage_speed_difference( - vehicle, cav_config['vehicle_speed_perc']) - tm.auto_lane_change(vehicle, traffic_config['auto_lane_change']) - tm.ignore_lights_percentage(vehicle, 0) - else: - if 'spawn_special' not in cav_config: - cav_vehicle_bp.set_attribute('color', '0, 0, 255') - else: - cav_vehicle_bp.set_attribute('color', '255, 0, 0') - vehicle = self.world.spawn_actor(cav_vehicle_bp, spawn_transform) - # create vehicle manager for each cav - vehicle_manager = ExtendedVehicleManager( - vehicle, cav_config, application, - self.carla_map, self.cav_world, - current_time=self.scenario_params['current_time'], - data_dumping=data_dump, - pldm=pldm, log_dir=log_dir) + #using spawn point from yaml file. + # sp= carla.Transform( + # carla.Location( + # x=cav_config['spawn_position'][0], + # y=cav_config['spawn_position'][1], + # z=cav_config['spawn_position'][2]), + # carla.Rotation( + # pitch=cav_config['spawn_position'][5], + # yaw=cav_config['spawn_position'][4], + # roll=cav_config['spawn_position'][3])) - vehicle.set_autopilot(True, tm.get_port()) - if 'vehicle_speed_perc' in cav_config: - tm.vehicle_percentage_speed_difference( - vehicle, cav_config['vehicle_speed_perc']) - tm.auto_lane_change(vehicle, traffic_config['auto_lane_change']) - tm.ignore_lights_percentage(vehicle, 0) + vehicle = self.world.spawn_actor(cav_vehicle_bp, spawnPoint) - self.world.tick() + # create vehicle manager for each cav + vehicle_manager = ExtendedVehicleManager( + vehicle, cav_config, application, + self.carla_map, self.cav_world, + current_time=self.scenario_params['current_time'], + data_dumping=data_dump) - vehicle_manager.v2x_manager.set_platoon(None) + self.world.tick() - destination = carla.Location(x=cav_config['destination'][0], - y=cav_config['destination'][1], - z=cav_config['destination'][2]) - vehicle_manager.update_info_LDM() - vehicle_manager.set_destination( - vehicle_manager.vehicle.get_location(), - destination, - clean=True) + vehicle_manager.v2x_manager.set_platoon(None) - single_cav_list.append(vehicle_manager) + destination = carla.Location(x=cav_config['destination'][0], + y=cav_config['destination'][1], + z=cav_config['destination'][2]) + vehicle_manager.update_info_LDM() + vehicle_manager.set_destination( + vehicle_manager.vehicle.get_location(), + destination, + clean=True) + vehicle_manager.vehicle.set_autopilot(True, 8000) + + tm = self.client.get_trafficmanager() + tm.auto_lane_change(vehicle, True) + tm.ignore_lights_percentage(vehicle_manager.vehicle, 1) + + single_cav_list.append(vehicle_manager) return single_cav_list @@ -784,6 +753,249 @@ def create_rsu_manager(self, data_dump): return rsu_list + + def spawn_pedestrian_by_list (self, tm, traffic_config, pedestrian_list): + """ + Spawn the pedestrians by the given list + + Parameters + ---------- + tm : carla.TrafficManager + Traffic manager. + + traffic_config : dict + Background traffic configuration. + + pedestrian_list : list + The list contains all pedestrians. + + Returns + ------- + pedestrians_list : list + Update pedestrians list. + """ + + # list of spawn points for pedestrians near CAV1 + spawn_point_pedestrians = [] + i=0 + while i<400: + sp = self.world.get_random_location_from_navigation() + spawn_point_pedestrians.append(sp) + debug_text = f"ID:{round(i)} X:{sp.x:.1f} Y:{sp.y:.1f}" + self.world.debug.draw_string(sp, str(round(i)), False, carla.Color(255, 255, 255), + 100) + print(str(round(i)), " : x = ", sp.x, " , y = ", sp.y) + i=i+1 + spectator = self.world.get_spectator() + # assign the first CAV as the spectator vehicle + #spectator_vehicle = single_cav_list[0].vehicle + # get the transform of the spectator vehicle + transform = carla.Transform(carla.Location(x=-64,y=24)) + # set the spectator to the top of the spectator vehicle + spectator.set_transform(carla.Transform(transform.location + + carla.Location(z=110), + carla.Rotation(pitch=-90))) + + blueprint_library = self.world.get_blueprint_library() + walker_controller_bp = blueprint_library.find('controller.ai.walker') + SpawnActor = carla.command.SpawnActor + batch = [] + all_id = [] + all_actors = [] + + #Create batch with locations to spawn pedestrians + for i, pedestrian_config in enumerate(traffic_config['pedestrian_list']): + spawn_transform = carla.Transform( + carla.Location( + x=pedestrian_config['spawn_position'][0], + y=pedestrian_config['spawn_position'][1], + z=pedestrian_config['spawn_position'][2])) + + walker_bp = random.choice(blueprint_library.filter('walker.pedestrian.*')) + batch.append(SpawnActor(walker_bp, spawn_transform)) + + #print spawning points on the map + self.world.debug.draw_string(spawn_transform.location, str(round(i)), False, carla.Color(200, 0, 200), 100) + + #Spawn pedestrians + results = self.client.apply_batch_sync(batch, True) + for i in range(len(results)): + if results[i].error: + logging.error(results[i].error) + else: + pedestrian_list.append({"id": results[i].actor_id}) + + batch = [] + for i in range(len(pedestrian_list)): + batch.append(SpawnActor(walker_controller_bp, carla.Transform(), pedestrian_list[i]['id'])) + + #Spawn controllers + results = self.client.apply_batch_sync(batch, True) + for i in range(len(results)): + if results[i].error: + logging.error(results[i].error) + else: + pedestrian_list[i]['con'] = results[i].actor_id + + for i in range(len(pedestrian_list)): #pedestrian_list: list of dict {'con': id, 'walker': id} + all_id.append(pedestrian_list[i]["con"]) #all_id: list of ids [con, walker, con ,walker...] + all_id.append(pedestrian_list[i]["id"]) + all_actors = self.world.get_actors(all_id) #all_actors: list of actors [actor con, actor walker, actor con....] + + + for i in range(0, len(all_id), 2): + # start walker + all_actors[i].start() + # set walk to random point + all_actors[i].go_to_location(self.world.get_random_location_from_navigation()) + + + print(len(pedestrian_list), "pedestrians were spawned") + + return all_id + def spawn_pedestrian_by_radius (self, tm, traffic_config): + """ + Spawn the pedestrians by the given list + + Parameters + ---------- + tm : carla.TrafficManager + Traffic manager. + + traffic_config : dict + Background traffic configuration. + + second_pedestrian_list : list + The list contains all pedestrians. + + Returns + ------- + pedestrians_list : list + Update pedestrians list. + """ + + # list of spawn points for pedestrians near CAV1 + # spawn_point_pedestrians = [] + # i=0 + # while i<150: + # sp = self.world.get_random_location_from_navigation() + # spawn_point_pedestrians.append(sp) + # self.world.debug.draw_string(sp, str(round(i)), False, carla.Color(200, 0, 200), + # 100) + # i=i+1 + # spectator = self.world.get_spectator() + # # assign the first CAV as the spectator vehicle + # #spectator_vehicle = single_cav_list[0].vehicle + # # get the transform of the spectator vehicle + # transform = carla.Transform(carla.Location(x=-64,y=24)) + # # set the spectator to the top of the spectator vehicle + # spectator.set_transform(carla.Transform(transform.location + + # carla.Location(z=110), + # carla.Rotation(pitch=-90))) + + + blueprint_library = self.world.get_blueprint_library() + walker_controller_bp = blueprint_library.find('controller.ai.walker') + SpawnActor = carla.command.SpawnActor + batch = [] + all_id = [] + all_actors = [] + spawn_point_pedestrians = [] + i=0 + j=0 + second_pedestrian_list = [] + ped_not_spawned = 0 + n_pedestrian = traffic_config['n_pedestrian'] + radius = traffic_config['radius'] + same_dest = traffic_config['same_destination'] + + cav_config = self.scenario_params['scenario']['single_cav_list'][0] + if traffic_config['center'] == [0, 0]: #if we do not specify, radius is around cav1 + center_x = cav_config['spawn_position'][0] + center_y = cav_config['spawn_position'][1] + else: + center_x = traffic_config['center'][0] + center_y = traffic_config['center'][1] + + while i < n_pedestrian: + sp = self.world.get_random_location_from_navigation() + if (center_x-radius < sp.x < center_x+radius) & (center_y-radius < sp.y < center_y+radius): + # spawn_point_pedestrians.append(sp) + walker_bp = random.choice(blueprint_library.filter('walker.pedestrian.*')) + spawnpoint = carla.Transform(carla.Location(sp)) + batch.append(SpawnActor(walker_bp, spawnpoint)) + i= i+1 + j=j+1 + + if j > 4000: + print('Not enough spawn points for the corresponding radius and number of pedestrians chosen.') + break + + + #Spawn pedestrians + results = self.client.apply_batch_sync(batch, True) + for i in range(len(results)): + if results[i].error: + logging.error(results[i].error) + ped_not_spawned = ped_not_spawned + 1 + else: + second_pedestrian_list.append({"id": results[i].actor_id}) + + count = 0 + count2 = 0 + while count < ped_not_spawned: + sp = self.world.get_random_location_from_navigation() + if (center_x - radius < sp.x < center_x + radius) & (center_y - radius < sp.y < center_y + radius): + walker_bp = random.choice(blueprint_library.filter('walker.pedestrian.*')) + spawnpoint = carla.Transform(carla.Location(sp)) + ped = self.world.try_spawn_actor(walker_bp, spawnpoint) + if ped is not None: + second_pedestrian_list.append({"id": ped.id}) + count = count + 1 + print("New location found") + count2 = count2 + 1 + if count2 > 4000: + print('Not enough spawn points for the corresponding radius and number of pedestrians chosen.') + break + + + + batch = [] + for i in range(len(second_pedestrian_list)): + batch.append(SpawnActor(walker_controller_bp, carla.Transform(), second_pedestrian_list[i]['id'])) + + #Spawn controllers + results = self.client.apply_batch_sync(batch, True) + for i in range(len(results)): + if results[i].error: + logging.error(results[i].error) + else: + second_pedestrian_list[i]['con'] = results[i].actor_id + + for i in range(len(second_pedestrian_list)): #pedestrian_list: list of dict {'con': id, 'walker': id} + all_id.append(second_pedestrian_list[i]["con"]) #all_id: list of ids [con, walker, con ,walker...] + all_id.append(second_pedestrian_list[i]["id"]) + all_actors = self.world.get_actors(all_id) #all_actors: list of actors [actor con, actor walker, actor con....] + + if same_dest: + go_to = self.world.get_random_location_from_navigation() + for i in range(0, len(all_id), 2): + # start walker + all_actors[i].start() + # set walk to random point + all_actors[i].go_to_location(go_to) + else: + for i in range(0, len(all_id), 2): + # start walker + all_actors[i].start() + # set walk to random point + all_actors[i].go_to_location(self.world.get_random_location_from_navigation()) + + print(len(second_pedestrian_list), "pedestrians were spawned by range, center = [ ", center_x, ", ", center_y, + " ], radius = ", radius) + + return all_id + def spawn_vehicles_by_list(self, tm, traffic_config, bg_list): """ Spawn the traffic vehicles by the given list. @@ -848,6 +1060,15 @@ def spawn_vehicles_by_list(self, tm, traffic_config, bg_list): 'color').recommended_values) ego_vehicle_bp.set_attribute('color', color) + if vehicle_config['blueprint'] == 'bicycle': + ego_vehicle_bp = blueprint_library.find('vehicle.bh.crossbike') + elif vehicle_config['blueprint'] == 'motorcycle': + ego_vehicle_bp = blueprint_library.find('vehicle.harley-davidson.low_rider') + else: pass + + # spawnPoints = self.world.get_map().get_spawn_points() + + vehicle = self.world.spawn_actor(ego_vehicle_bp, spawn_transform) vehicle.set_autopilot(True, tm.get_port()) @@ -1013,8 +1234,28 @@ def create_traffic_carla(self, port=8000): else: bg_list = self.spawn_vehicle_by_range(tm, traffic_config, bg_list) + pedestrian_list = [] + second_pedestrian_list = [] + + if 'pedestrian_list' in traffic_config: + if isinstance(traffic_config['pedestrian_list'], list) or \ + isinstance(traffic_config['pedestrian_list'], ListConfig): + pedestrian_list = self.spawn_pedestrian_by_list(tm, + traffic_config, + pedestrian_list) + + #type(traffic_config['pedestrian_by_radius']) + if 'pedestrian_by_radius' in traffic_config: + if traffic_config['pedestrian_by_radius'] == True: + for i, batch_config in enumerate(traffic_config['spawn_list_by_radius']): + second_pedestrian_list += self.spawn_pedestrian_by_radius(tm, + batch_config) + + all_id = pedestrian_list + second_pedestrian_list + pedestrian_list = self.world.get_actors(all_id) # list of actors [actor con, actor walker, actor con....] + print('CARLA traffic flow generated.') - return tm, bg_list + return tm, bg_list, pedestrian_list def create_traffic_carla_by_number(self, vehicle_number): traffic_config = self.scenario_params['carla_traffic_manager'] @@ -1054,7 +1295,7 @@ def create_traffic_carla_by_spawn_point(self, spawnPoints): # iterate over the spawn points and do self.world.debug.draw_string(spawnPoints[i].location, str(round(0)), False, carla.Color(200, 200, 0), 10) sps = self.world.get_map().get_spawn_points() for i in range(len(sps)): - self.worlwd.debug.dwraw_string(sps[i].location, str(round(i)), False, carla.Color(200, 200, 0), 100) + self.world.debug.draw_string(sps[i].location, str(round(i)), False, carla.Color(200, 200, 0), 100) spawn_points = self.world.get_map().get_spawn_points() for sp in spawnPoints: