diff --git a/MANIFEST.in b/MANIFEST.in index c779152..fe3e115 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,4 @@ include omega_format/visualization/ui/main.ui include omega_format/visualization/ui/icon.svg include versioneer.py include omega_format/_version.py +include omega_format/perception/config.json diff --git a/__init__.py b/__init__.py deleted file mode 100644 index 026b7d1..0000000 --- a/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .omega_format import * -# DO NOT REMOVE. NEEDED TO BE USED AS GIT SUBMODULE diff --git a/cpp/include/MetaData.h b/cpp/include/MetaData.h index 75875b9..82aabc6 100644 --- a/cpp/include/MetaData.h +++ b/cpp/include/MetaData.h @@ -28,6 +28,9 @@ namespace omega { std::string weatherConverterVersion; std::string stateConverterVersion; std::string miscObjectConverterVersion; + std::string customInformation; + + int referenceModality; public: // default constructor creates "empty" object diff --git a/cpp/include/vvm_definitions.h b/cpp/include/vvm_definitions.h index 4bbbdab..3c84bd1 100644 --- a/cpp/include/vvm_definitions.h +++ b/cpp/include/vvm_definitions.h @@ -20,7 +20,7 @@ #include "perception_types.h" #else -#error Generated headers are missing. Generate them be executing the generate_enums.py in the vvm_recording_format/vvm_rec/enums directory. +#error Generated headers are missing. Generate them be executing the generate_enums.py in the omega_format/enums directory. #endif #endif //VVM_DEFINITIONS_H diff --git a/cpp/src/MetaData.cpp b/cpp/src/MetaData.cpp index a2c9af8..1236f80 100644 --- a/cpp/src/MetaData.cpp +++ b/cpp/src/MetaData.cpp @@ -12,6 +12,8 @@ namespace omega { refPointLat = 0.0; recorderNumber = VVMRecorderNumber::IKA; recordingNumber = 0; + referenceModality = 3; + customInformation = ""; naturalBehavior = true; naturalExposure = true; daytime = ""; @@ -30,6 +32,9 @@ namespace omega { omega::add_attribute_to_group(parent_group, "refPointLat", this->refPointLat); omega::add_attribute_to_group(parent_group, "refPointLong", this->refPointLong); + omega::add_attribute_to_group(parent_group, "referenceModality", this->referenceModality); + omega::add_attribute_to_group(parent_group, "customInformation", this->customInformation); + omega::add_attribute_to_group(parent_group, "daytime", this->daytime); omega::add_attribute_to_group(parent_group, "converterVersion", this->topLevelConverterVersion); @@ -71,6 +76,9 @@ namespace omega { read_attribute(parent_group, "refPointLat", meta_data.refPointLat); read_attribute(parent_group, "refPointLong", meta_data.refPointLong); + read_attribute(parent_group, "referenceModality", meta_data.referenceModality); + read_attribute(parent_group, "customInformation", meta_data.customInformation); + read_attribute(parent_group, "daytime", meta_data.daytime); omega::read_attribute(parent_group, "converterVersion", meta_data.topLevelConverterVersion); diff --git a/doc/images/2-8_global_coords.jpg b/doc/images/2-8_global_coords.jpg new file mode 100644 index 0000000..92c96c0 Binary files /dev/null and b/doc/images/2-8_global_coords.jpg differ diff --git a/doc/perception/.gitkeep b/doc/perception/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/doc/perception/images/.gitkeep b/doc/perception/images/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/doc/perception/images/IMG_1.jpg b/doc/perception/images/IMG_1.jpg new file mode 100644 index 0000000..ffabfb5 Binary files /dev/null and b/doc/perception/images/IMG_1.jpg differ diff --git a/doc/perception/images/image_0.png b/doc/perception/images/image_0.png new file mode 100644 index 0000000..1567ca3 Binary files /dev/null and b/doc/perception/images/image_0.png differ diff --git a/doc/perception/images/image_1.png b/doc/perception/images/image_1.png new file mode 100644 index 0000000..3911bb2 Binary files /dev/null and b/doc/perception/images/image_1.png differ diff --git a/doc/perception/images/image_1_fin.jpg b/doc/perception/images/image_1_fin.jpg new file mode 100644 index 0000000..31bbe68 Binary files /dev/null and b/doc/perception/images/image_1_fin.jpg differ diff --git a/doc/perception/images/image_2.png b/doc/perception/images/image_2.png new file mode 100644 index 0000000..e4c5945 Binary files /dev/null and b/doc/perception/images/image_2.png differ diff --git a/doc/perception/signal_list.md b/doc/perception/signal_list.md new file mode 100644 index 0000000..15cf1ce --- /dev/null +++ b/doc/perception/signal_list.md @@ -0,0 +1,157 @@ +# PerceptionDB signal list version 3.0 + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|formatVersion|Top level|yes|||Enriched|Version number of input format used to generate this file|string|-|-|version x.y +|recorderNumber|Top level|yes|||Enriched|Number assigned to the partner performing the recording. |int|-|-|Same name can be found in scenario input format. +|recordingNumber|Top level|yes|||Enriched|Number of recording. Together with recorder number unique filename to connect ground truth recordings with the corresponding sensor under test recordings|int|-|-|Same name can be found in scenario input format. ||||||||||| +|converterVersion|Top level|yes|||Enriched|Version number of converter used to convert the data|string|-|-|Version x.y. Short documentation on converter also needs to be provided||||||||||| +|egoID|Top level|yes|||Enriched|ID of ego vehicle equiped with sensors matching the id in the DGT|int|id|-|||||||||||| +|egoOffset|Top level|yes|||Enriched|Offset/delta of ego vehicle between center point and rear axle (in x-direction of vehicle coordinate system)|double|m|-|Needed to transform from global coordinate system into vehicle coordinate system||||||||||| +|customInformation|Top level|yes|||Enriched|"provide any custom information that you want the data analyzer to have| but does not fit in any of the fields below"|string|-|-|||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|timestamps|Top level|no|||Measured|Timestamps in seconds since beginnning of recording|double|s|-|All values that change over time have the same vector length as timestamps --> each entry in timestamps corresponds to one entry in the other vectors||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|distLongitudinalValType|object|yes|||Enriched|"Provide if signal is measured by the sensor| derived manually through other information or not provided at all"|int|-||||||||||||| +|distLongitudinalVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|distLateralValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|distLateralVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|distZValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|distZVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relVelLongitudinalValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relVelLongitudinalVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relVelLateralValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relVelLateralVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absVelLongitudinalValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absVelLongitudinalVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absVelLateralValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absVelLateralVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relAccLongitudinalValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relAccLongitudinalVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relAccLateralValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|relAccLateralVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absAccLongitudinalValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absAccLongitudinalVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absAccLateralValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|absAccLateralVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|objectClassificationValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|objectClassificationConfidenceValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|headingValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|headingVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|widthValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|widthVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|heightValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|heightVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|lengthValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|lengthVarType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|rcsValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|ageValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|trackingPointValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|confidenceOfExistenceValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|movementClassificationValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|measStateValType|object|yes|||Enriched|"of what type is the signal (not provided, measured or determined)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|_id_|_object_||||_Measured_|_Id of detected object_|_int_|_id_|-|||||||||||| +|birthStamp|object/{id}|yes|||Measured|Index in timestamps vector at birth of traffic participant (when it was first recorded by perception)|int|-|-|||||||||||| +|val|object/{id}|no||distLongitudinal|Measured|Distance to center of object in ego coordinate system in longitudinal direction|double|m|25 Hz / 10 Hz|How to define ego coordinate system will be provided in specification||||||||||| +|var|object/{id}|no||distLongitudinal|Measured / Enriched|Variance of longitudinal distance|double|m^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||distLateral|Measured|Distance to center of object in ego coordinate system in lateral direction|double|m|25 Hz / 10 Hz|How to define ego coordinate system will be provided in specification||||||||||| +|var|object/{id}|no||distLateral|Measured / Enriched|Variance of lateral distance|double|m^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||distZ|Measured|Distance to center of object in ego coordinate system in z direction|double|m|25 Hz / 10 Hz|How to define ego coordinate system will be provided in specification||||||||||| +|var|object/{id}|no||distZ|Measured / Enriched|Variance of distance in z direction|double|m^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||relVelLongitudinal|Measured|Velocity relativ to ego in longitudinal direction|double|m/s|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||relVelLongitudinal|Measured / Enriched|variance of relativ velocity in longitudinal direction|double|m^2/s^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||relVelLateral|Measured|Velocity relativ to ego in lateral direction|double|m/s|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||relVelLateral|Measured / Enriched|Variance of relativ velocity in lateral direction|double|m^2/s^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||absVelLongitudinal|Measured|Absolute velocity of object in longitudinal direction|double|m/s|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||absVelLongitudinal|Measured / Enriched|variance of absolute velocity of object in longitudinal direction|double|m^2/s^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||absVelLateral|Measured|Absolute velocity of object in lateral direction|double|m/s|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||absVelLateral|Measured / Enriched|variance of absolute velocity of object in lateral direction|double|m^2/s^2|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||relAccLongitudinal|Measured|Relative acceleration in longitudinal direction|double|m/s^2|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||relAccLongitudinal|Measured / Enriched|Variance of relative acceleration in longitudinal direction|double|m^2/s^4|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||relAccLateral|Measured|Relative acceleration in lateral direction|double|m/s^2|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||relAccLateral|Measured / Enriched|Variance of relative acceleration in lateral direction|double|m^2/s^4|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||absAccLongitudinal|Measured|Absolute acceleration in longitudinal direction|double|m/s^2|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||absAccLongitudinal|Measured / Enriched|Variance of absolute acceleration in longitudinal direction|double|m^2/s^4|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||absAccLateral|Measured|Absolute acceleration in lateral direction|double|m/s^2|25 Hz / 10 Hz|||||||||||| +|var|object/{id}|no||absAccLateral|Measured / Enriched|Variance of absolute acceleration in lateral direction|double|m^2/s^4|25 Hz / 10 Hz|All variances can either be measured by the used sensor or they can be used to note down that this value is not provided by the sensor. This is noted down by -1. If only the variance is not provided this is noted down by an entry of 10^5||||||||||| +|val|object/{id}|no||objectClassification|Measured|Object classification|int|-|25 Hz / 10 Hz|Lookup table will be provided||||||||||| +|confidence|object/{id}|no||objectClassification|Measured / Enriched|Classification confidence. The higher the number the more reliable is the assignment|double|-|25 Hz / 10 Hz|If not provided use -1||||||||||| +|val|object/{id}|no||heading|Measured|heading angle of target object|double|rad|25Hz / 10Hz|||||||||||| +|var|object/{id}|no||heading|Measured / Enriched|variance of heading angle of target object|double|rad^2|25Hz / 10Hz|||||||||||| +|val|object/{id}|no||width|Measured|width of target|double|m|25Hz / 10Hz|||||||||||| +|var|object/{id}|no||width|Measured / Enriched|variance of width of target|double|m^2|25Hz / 10Hz|||||||||||| +|val|object/{id}|no||height|Measured|height of target|double|m|25Hz / 10Hz|||||||||||| +|var|object/{id}|no||height|Measured / Enriched|variance of height of target|double|m^2|25Hz / 10Hz|||||||||||| +|val|object/{id}|no||length|Measured|length of target|double|m|25Hz / 10Hz|||||||||||| +|var|object/{id}|no||length|Measured / Enriched|variance of length of target|double|m^2|25Hz / 10Hz|||||||||||| +|rcs|object/{id}|no|||Measured|RCS of target|double|dBm^2|25Hz / 10Hz|||||||||||| +|age|object/{id}|no|||Measured|Age of target (how long has it been tracked)|double|s|25Hz / 10Hz|||||||||||| +|trackingPoint|object/{id}|no|||Measured|which tracking point/feature (3x3 grid on object) was detected|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|confidenceOfExistence|object/{id}|no|||Measured|Existance probability|double|%|25Hz / 10Hz|||||||||||| +|movementClassification|object/{id}|no|||Measured / Enriched|"Classification of movement (e.g. oncoming, crossing, stationary etc.)"|int|-|25Hz / 10Hz|Lookup table will be provided||||||||||| +|measState|object/{id}|no|||Measured / Enriched|Measurement State|int |-|25Hz / 10Hz|Lookup table will be provided||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Metainformation on sensor||||||||||Sensorspecification should also be provided||||||||||| +|sensorModality|sensor/{id}|yes|||Enriched|"Radar Camera Lidar Fusion etc."|int|-|-|Lookup Table will be provided||||||||||| +|fusionInformation|sensor/{id}|yes|||Enriched|In case of fusion you can provide information on the used sensor modalities here as a list|string|-|-|||||||||||| +|sensorName|sensor/{id}|yes|||Enriched|Exact name of sensor|string|-|-|||||||||||| +|_sensorID_|_sensor_||||_Enriched_|_Id of sensor in order to couple metainformation and object data_|_int_|-|-|||||||||||| +|firmwareVersion|sensor/{id}|yes|||Enriched|Number of firmware flashed on sensor used or version number of fusion algorithm if fusion|string|-|-|||||||||||| +|originalUpdaterate|sensor/{id}|yes|||Enriched|Provide the original framerate from which the data has probably be down or upsampled|double|Hz|-|||||||||||| +|sensorPosLongitudinal|sensor/{id}|yes|||Measured|Mounting position of sensor relativ to ego coordinate system origin|double|m|-|||||||||||| +|sensorPosLateral|sensor/{id}|yes|||Measured|Mounting position of sensor relativ to ego coordinate system origin|double|m|-|||||||||||| +|sensorPosZ|sensor/{id}|yes|||Measured|Mounting position of sensor relativ to ego coordinate system origin|double|m|-|||||||||||| +|sensorHeading|sensor/{id}|yes|||Measured|Orientation of the sensor in the ego coordinate system|double|degree|-|||||||||||| +|sensorPitch|sensor/{id}|yes|||Measured|Orientation of the sensor in the ego coordinate system|double|degree|-|||||||||||| +|sensorRoll|sensor/{id}|yes|||Measured|Orientation of the sensor in the ego coordinate system|double|degree|-|||||||||||| +|maxRange|sensor/{id}|yes|||Enriched|Maximum range of sensor in short range|double|m|-|||||||||||| +|minRange|sensor/{id}|yes|||Enriched|minimum range of sensor in short range|double|m|-|||||||||||| +|foVVertical|sensor/{id}|yes|||Enriched|Vertical field of view|double|degree|-|||||||||||| +|foVHorizontal|sensor/{id}|yes|||Enriched|Horizontal field of view|double|degree|-|Complete angle. Angle is assumed to extent equally to the left and the right hand side||||||||||| +|minVelocity|sensor/{id}|yes|||Enriched|Minimum velocity sensor can measure|double|m/s|-|||||||||||| +|maxVelocity|sensor/{id}|yes|||Enriched|Maximum velocity sensor can measure|doubble|m/s|-|||||||||||| +|angleResolutionVertical|sensor/{id}|yes|||Enriched|Angle resolution in vertical direction|double|degree|-|||||||||||| +|angleResolutionHorizontal|sensor/{id}|yes|||Enriched|Angle resolution in horizontal direction|double|degree|-|||||||||||| +|rangeResolution|sensor/{id}|yes|||Enriched|horizontal distance resolution|double|m|-|if only one value is given for distance resolution use range entry||||||||||| +|verticalResolution|sensor/{id}|yes|||Enriched|vertical distance resolution|double|m|-|if only one value is given for distance resolution use range entry||||||||||| +|velocityResolution|sensor/{id}|yes|||Enriched|velocity resolution|double|m/s|-|||||||||||| +|angleAccuracy|sensor/{id}|yes|||Enriched|angel accuracy|double|degree|-|||||||||||| +|verticalAccuracy|sensor/{id}|yes|||Enriched|vertical distance accuracy|double|m|-|if only one value is given for distance accuracy use range entry||||||||||| +|rangeAccuracy|sensor/{id}|yes|||Enriched|horizontal distance accuracy|double|m|-|if only one value is given for distance accuracy use range entry||||||||||| +|velocityAccuracy|sensor/{id}|yes|||Enriched|velocity distance accuracy|double|m/s|-|||||||||||| +|anglePrecision|sensor/{id}|yes|||Enriched|angle precision|double|degree|-|||||||||||| +|rangePrecision|sensor/{id}|yes|||Enriched|horizontal distance precision|double|m|-|if only one value is given for distance precission use range entry||||||||||| +|verticalPrecision|sensor/{id}|yes|||Enriched|vertical distance precision|double|m|-|if only one value is given for distance precission use range entry||||||||||| +|velocityPrecision|sensor/{id}|yes|||Enriched|velocity precision|double|m/s|-|||||||||||| +|trackConfirmationLatency|sensor/{id}|yes|||Enriched|Time until track of new object is outputed|double|ms|-|||||||||||| +|trackDropLatency|sensor/{id}|yes|||Enriched|Time until track of an object previously tracked is deleted|double|ms|-|||||||||||| +|maxObjectTracks|sensor/{id}|yes|||Enriched|maximum number of objects the sensor can track|int|-|-|||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Ego Position||||||can be filled with GNSS data or other self-determined positioning data||||||||||||||| +|val|egoPosition|no|posLongitude||Measured||double|°E|25Hz/10Hz|||||||||||| +|var|egoPosition|no|posLongitude||Measured||double||25Hz/10Hz|||||||||||| +|val|egoPosition|no|posLatitude||Measured||double|°N|25Hz/10Hz|||||||||||| +|var|egoPosition|no|posLatitude||Measured||double||25Hz/10Hz|||||||||||| +|val|egoPosition|no|posZ||Measured||double|m|25Hz/10Hz|||||||||||| +|var|egoPosition|no|posZ||Measured|||||||||||||||| +|val|egoPosition|no|heading||Measured||double|deg|25Hz/10Hz|||||||||||| +|var|egoPosition|no|heading||Measured||double|deg^2|25Hz/10Hz|||||||||||| +|yawRate|egoPosition|no|||Measured||double|deg/s|25Hz/10Hz|||||||||||| +|pitch|egoPosition|no|||Measured||double|deg|25Hz/10Hz|||||||||||| + +| Name |Type (Group) |Attribute |Group |Subgroup |Signal Class |Description |Data type |unit |Data rate |Comment +|:-----------------------|:---------------------|:-----------------|:--------------|:-----------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------|:--------|:-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Additional Sensors (if vehicle equipped with such)||||||Additional sensors (if vehicle equipped with such)||||AVL considers equiping the vehicle with additional sensors for the sensor under test setup||||||||||| +|lightIntensity|MiscInfo||||Measured|light distance intensity for short range|double|lux|25Hz/10Hz|information will follow once sensor has been specified and output of it is clear||||||||||| +|acoustics|MiscInfo||||Measured|acoustics distance s for short range|double|dB|25Hz/10Hz|information will follow once sensor has been specified and output of it is clear||||||||||| diff --git a/doc/perception/specifcation.md b/doc/perception/specifcation.md new file mode 100644 index 0000000..24fd34f --- /dev/null +++ b/doc/perception/specifcation.md @@ -0,0 +1,858 @@ +| DOKUMENTATION | +|:--------------------------------------------------------| +| **Specification Input Data Format Perception Database** | +| | +| Art: Dokumentation/Bericht | +| | +| Ersteller: Maike Scholtes (ika) | +| | +| Version 1.3 | +| | +| Datum: 01.09.2021 | + +# Table of Contents + +[1. Introduction and Information on this Document](#introduction-and-information-on-this-document) + +[2. General Information](#general-information) + +[2.1 Sensor Coordinate System](#sensor-coordinate-system) + +[2.2 Ego Coordinate System](#ego-coordinate-system) + +[2.3 Reference Point](#reference-point) + +[2.4 Timestamps and Data Rate](#timestamps-and-data-rate) + +[2.5 Type](#type) + +[2.6 Compulsory Fields and non compulsory fields](#compulsory-fields-and-non-compulsory-fields) + +[2.7 Variances](#variances) + +[3. Top Level](#top-level) + +[3.1 formatVersion](#formatversion) + +[3.2 RecorderNumber](#recordernumber) + +[3.3 RecordingNumber](#recordingnumber) + +[3.4 converterVersion](#converterversion) + +[3.5 EgoID](#egoid) + +[3.6 EgoOffset](#egooffset) + +[3.7 CustomInformation](#custominformation) + +[4. Timestamps](#timestamps) + +[5. Object](#object) + +[5.1 Meta Information (Attributes)](#meta-information-attributes) + +[5.2 ID](#id) + +[5.3 birthStamp](#birthstamp) + +[5.4 Object/id...](#objectid) + +[5.4.1 Object Classification](#object-classification) + +[5.4.2 Confidence (of object classification)](#confidence-of-object-classification) + +[5.4.3 heading](#heading) + +[5.4.4 Movement Classification ](#movement-classification) + +[5.4.5 Confidence of Existence](#confidence-of-existence) + +[5.4.6 MeasState](#measstate) + +[5.4.7 RCS (val/valType)](#rcs-valvaltype) + +[5.4.8 Tracking Point](#tracking-point) + +[6. Sensor](#sensor) + +[6.1 Sensor Modality](#sensor-modality) + +[6.2 fusionInformation](#fusioninformation) + +[6.3 FirmwareVersion](#firmwareversion) + +[6.4 OriginalUpdateRate](#originalupdaterate) + +[6.5 SensorPos](#sensorpos) + +[6.6 Sensor Orientation](#sensor-orientation) + +[6.7 Range](#range) + +[6.8 VelocityRange](#velocityrange) + +[6.9 Field of View](#field-of-view) + +[6.10 Resolution](#resolution) + +[6.11 Accuracy](#accuracy) + +[6.12 Precision](#precision) + +[6.13 Track Timing Information](#track-timing-information) + +[7. Sensor Datasheet](#sensor-datasheet) + +[8. egoPosition](#egoposition) + +[9. Additional Sensors](#additional-sensors) + +## Versions + +| Name | Datum | Version | Kurzbeschreibung | +|:---------------|:-----------|:--------|:--------------------| +| Maike Scholtes | 25.06.2020 | V0.1 | Initial Document | +| Maike Scholtes | 05.11.2020 | V1.1 | Changing for some
signal names, introcuction of
more groups and subgroups | +| Maike Scholtes | 18.03.2021 | V1.2 | Major updates
Restructuring of metainformation
Introduction of attributes
Error corrections | +| Maike Scholtes | 02.08.2021 | V1.2 | Update of documentation\
Additional information on
coordinate systems
| +| Maike Scholtes | 18.08.2021 | V1.3 | Version update,
see change log | + +# Introduction and Information on this Document + +This document goes hand in hand with the excel document that provides +the signal list for the Perception Database input format. All +information can be found on Confluence: +[link](https://confluence.vdali.de/x/b4B-/). + +This document will explain signals that are not self-explanatory (i.e. +not all signals of the excel list are mentioned here). Furthermore, it +will provide lookup tables wherever this is necessary to gain a uniform +format. + +# General Information + +Similar to the scenario input format the data format chosen is **hdf5**. +The format is structured into different parts. It starts with some +general information on the recording scenario itself. The main +information that is stored in this format is information gathered by +different sensors on detected objects (however, only on sensor modality +per file). Within VVM these sensors will include radar, camera and +lidar. The format works on object list basis, i.e. using data that +already went through a perception algorithm and did undergo detection +and tracking processes. Therefore, the format can also be used for fused +object information from different sensor sources. The group 'object' +contains the actual information on the different detected objects. +Another main part consist out of a detailed description of information +on the specific sensors used ('sensor'). Some of this information can be +gathered from the sensor data sheets, some might not be known due to the + +In general, the information provided in the perception database format +is linked to the information provided in the scenario database. The +scenario database functions as the ground truth reference data. + +In case of the AVL test vehicle, the ground truth, which is stored in +the scenario database, is gathered from the sensors of the vehicle's top +box and the labelling process the data undergoes. The data for the +perception database (i.e. the data stored in the format described in +this document) is gathered from the so called sensors under test (SeUT) +or perception under test (PeUT) that are installed around the vehicle. + +**While the PeUT, as described above, can consist of data from different +sensor modalities (e.g. object list of lidar and object list of radar +sensor), one separate file is needed for the different modalities.** For +instance, if one recording was made providing reference data, radar data +and lidar data, the reference data would be saved in the scenario input +format. The lidar and radar object data would be saved in two individual +files using the perception format. I.e., the whole setup would produce +three different files that can be uploaded in the individual data bases. +In case the perception data is gained from a fusion algorithm, this data +will be put into one perception file. The sensor group (see Section 6), +however, could contain information on every single sensor used in the +fusion. + +**Leave fields empty if you do not have the information. Do not put in +zeros as those can be mistaken for "real zeros".** + +## Sensor Coordinate System + +The sensor coordinate system is a coordinate system that has its origin +in the point of installation of the sensor on the vehicle. It will not +be used for the signals provided in the format. However, if a value is +desired in the sensor coordinate system, it can be calculated by the use +of the desired signals and the mounting position of the sensor in the +vehicle coordinate system (see Section 2.2) provided in the Sensor +information (see Chapter 6). The sensor coordinate system is identical +to the vehicle coordinate system (DIN ISO 8855) with the +x-axis/longitudinal pointing in the viewing direction of the sensor, the +y-axis/lateral pointing to the left and z-axis pointing upwards, just +moved to a different origin (sensor mounting position). The heading +angle is defined in the mathematical positive sense of rotation and +given in degrees between 0 and 360 deg. It is zero when looking in the +direction of the x-axis. + +## Ego Coordinate System + +The ego coordinate system is a standard vehicle coordinate system (DIN +ISO 8855). It has its origin on the rear axis, laterally in the center +of the vehicle and on the ground. The ego vehicle is the vehicle +recording the perception data. It is used for the object data in this +format. The heading is defined in mathematical positive sense of +rotation and given in degrees between 0 and 360 deg. It is zero when +looking in the direction of the x-axis. + +Please note the difference between the coordinate system used in the +scenario database format and the perception database format. In the +scenario database format an UTM based coordinate system is used with +heading angle zero in eastern direction. In the perception database the +ego coordinate system, which is a vehicle coordinate system, is used. +Figure 1 shows the ego coordinate system used in the perception database +along with a detected sensor object (green) as well as the coordinate +system used for the scenario database (black axis labelled UTM). + +![](images/image_0.png) + +Figure 1 - Different coordinate systems of perception and scenario +format + +## Reference Point + +All target objects described in the ‚Object' part of this input format +are described in their center and in the ego coordinate system. This +means that all measured data gathered by the different sensors must be +adapted so that the point provided is the center of the detected vehicle +in the ego vehicle coordinate system. + +## Timestamps and Data Rate + +The format uses, similarly to the scenario database input format, a +common timestamp vector. This means that all signals provided in the +perception database input format are synchronized with a common +timestamp. Furthermore, since the scenario database and the perception +database are coupled, the data rate, i.e., timestamp used in the +perception database needs to be equivalent with the one in the scenario +database input format. Precisely this means that when using the AVL test +vehicle and providing the ground truth with 10 Hz the sensor data also +needs to be provided with 10 Hz. For other data suppliers the usual +frame rate is 25 Hz. This also means that the timestamp vector in the +perception database format and the scenario database format are +equivalent in step size and entries. However, the perception timestamps +vector might start later than the scenario timestamps vector or only +contains a subset of the scenario timestamps vector, since the +perception recording might start at a different point in time. + +## Type + +The type field is provided for nearly every field in the object +category. It needs to be used in order to provide information on whether +the signal provided is actually measured by the sensor or if the +information has been calculated through other given information. +Furthermore, in this field it can be noted down if the information is +not provided at all (please note section 2.6 for more information on +compulsory and non compulsory fields). Considering the use case of the +perception database the original output of a sensor should always be +used if available. + +The lookup table used for all signals of the type 'type' is as follows: + +- Not provided = 0 + +- Measured = 1 + +- Determined = 2 + +## Compulsory Fields and non compulsory fields + +All sensors should at least provide: + +- Distances in longitudinal and lateral direction (z only if given) + +- Velocities in longitudinal and lateral direction (absolute and + relative) + +- Acceleration in longitudinal and lateral direction (absolute and + relative) + +In general, the same format is used for every sensor no matter which +type or model. The format always has the same signals. They do not +change. Therefore, it contains signals that cannot be provided by some +sensor modalities. If a sensor does not provide a signal, this is noted +down in the type field of that value (compare section 2.5). The field +for the actual value can contain any number if it is set to not provided +through the type field. However, using an empty field is advisable. + +Regarding the sensor information, any information that you have on the +specification of your sensor / your perception algorithm is useful. +Please try to gather as much information as possible. Under any +circumstances needed is information on the field of view. I.e., the +minimum information you need to provide is the range and the horizontal +opening angle. + +## Variances + +Variances can be noted down for most signals that are potentially +measured by the sensor and provided as output through the sensor. If the +sensor provides a variance for a signal, the value can be put in the +variance field for that signal. If it does not provide the variance or +the variance is determined through measured values, but not measured +itself, this can be noted down in the 'typeVarXXX' field for each +signal. + +# Top Level + +## formatVersion + +Version number of the input format used. Please provide number in the +following format: "x.y" + +In order to connect the scenario file (DGT) and the perception data +information on the filename of the GT for which the perception data is +recorded needs to be provided. The unique filename consists out of a +recorderNumber and a recordingNumber. + +## RecorderNumber + +Partner numbers/identifiers are generated on request. For now we are +certain that AVL and ika will deliver data. Therefore, those two are +already assigned a number below + +- 1: AVL + +- 2: ika + +## RecordingNumber + +The numbers of the individual recordings can be chosen by the partner +providing the data. It is important that those numbers are unique and +that the ground truth recording and the corresponding sensor under test +recording from the same drive get the same number. + +## converterVersion + +Since with the new findings in the project regarding the perception data +the entity recording the data is not necessarily the one converting the +data, the converter number should not only indicate with which version +of a converter the data is converted, but also indicate which entity did +the conversion. + +Version number of the converter used in the format version „x.y". Along +with the hdf5 data a short documentation on the converter used to +convert the data into the format needs to be provided. The documentation +should show which methods and algorithms were used to generate the data +on the basis of which original data. + +For now there are two entities known converting data. Those are already +assigned numbers below: + +- 1: Valeo + +- 2: ika + + This means the full converter number would look like the following + in case valeo is converting: + + 1-x.y + + Or like this if ika converted the data: + + 2-x.y + +## EgoID + +The egoID is also used to couple the scenario and the perception +database. The ground truth contains information on every actor within a +scenario. For the evaluation it is important to know which of these +actors is the vehicle equipped with the perception sensors. The id of +this vehicle in the scenario database needs to be provided here. +Together with the coupling with the filename, the allocation is +unambiguous. + +## EgoOffset + +Offset between center point of ego vehicle and its rear axle in meters +measured in x direction of the vehicle coordinate system. Value is +needed to transform from the global coordinate system (reference) into +the vehicle coordinate system. + +## CustomInformation + +This field can be used, if necessary, if there is any information you +want to give the data analyst that does not fit into any of the provided +fields. The information can be provided here in writing. An example for +such an use case form real life is: The sensor you used did not provide +a tracking for some reason. In order to use this format, however, you +need tracking since the objects are provided in individual folders +labelled by their id. Therefore, you have implemented your own post +processing tracking. You could note this in the customInformation field +so that the data analyst knows that this tracking is not one originally +provided by the sensors algorithms. + +# Timestamps + +Similar to the scenario database input format, the format at hand has +one consistent timestamp vector. All signals that are time dependent +(such as distance, velocity, acceleration etc.) are provided in vectors +where the value in the vector at a certain position corresponds to the +timestamp at the same position in the timestamp vector. + +The timestamp vector of the perception database input format is +equivalent to the timestamp vector in the scenario database input format +of one particular recording, since the two measurements are synched and +feature the same data rate. + +# Object + +The object type contains general information that should be given by +each sensor or is calculable by the given information and more specific +fields that can only be filled depending on the sensor type. Objects are +the objects of the object lists provided by the sensors (after +detection, clustering, tracking etc.). + +The object group contains all meta information described in the +following section on the top level. Furthermore, all time dependent +information as well as the birthStamp of each object is structured in +individual groups labelled with the object id. + +The structure is as follows: + +- Object + +- Metainformation (distLongitudinalValType, distLongitudinalVarType + ... etc.) + +- 0 + + - birthStamp + + - distLongitudinal + + - val + + - var + + - distLateral + + - val + + - var + + - ... + +- 1 + + - ... + +- 2 + + - ... + +- ... + +## Meta Information (Attributes) + +This group contains all meta information through attributes in the hdf5 +file. This meta information includes information on whether individual +data fields are calculated or measured directly by the sensor. The +information is contained in fields labeled in the following fashion: +informationXValType and informationXVarType. The fields labeled with +ValType refer to the actual value. Is it calculated or measured or not +provided at all. The field with the label VarType refer in the same way +to the variance fields. For instance, "distLongitudinalValType" contains +the information if the distance in longitudinal direction (in ego +coordinate system) is measured or calculated through other values. In +the same way "distLongitudinalVarType" contains this information for the +variance. + +The lookup list how to indicate not provided, calculated, measured etc. +can be found in Section 2.5. + +This meta information is given once as attributes in the top level of +the group object. It is the same for all detected objects. + +## ID + +Each object has one id. This id stays the same as long as the object is +tracked. The id is given indirectly through the position of the object +in the file. + +## birthStamp + +Comparable to the scenario description format, each object has a +birthStamp. This indicates at which point in the time stamp vector the +object appeared. I.e. if the birthStamp is 10, the object exists from +the point in time noted down at position 10 in the timestamps vector. + +## Object/id... + +The time dependent values per object (blue color in signal list, all +following entries of section 5) are contained in a group labelled with +the id of that specific object. For instance, +*object/0/distLongitudinal/val* for the longitudinal distance of object +with the id 0 etc. + +Not all fields of the signal list are discussed in the following. The +ones that are unique such as distance and velocity are left out. + +### Object Classification + +Different sensors can provide different classifications for the detected +objects. The used terms and the ability of the sensor can differ. The +list below provides different vehicle types as well as some not as +precise information. The information given by the sensor needs to be +matched to the terms presented in the lookup table below: + +- NO_INFO = 0 + +- CAR = 1 + +- TRUCK = 2 + +- MOTORCYCLE = 3 + +- PEDESTRIAN = 4 + +- BICYCLE = 5 + +- UNKNOWN = 15 + +- UNKNOWN_BIG = 14 + +- UNKNOWN_SMALL = 13 + +- SMALLER_THAN_CAR = 12 + +- BIGGER_THAN_CAR = 11 + +### Confidence (of object classification) + +Provide the object classification confidence in case the sensor outputs +this information (e.g. Ibeo Scalar). The higher the number, the larger +the certainty. Note in the objectClassificationConfidenceValType field +if this information is not provided. + +### heading + +Some sensors are able to provide the heading angle of the target. If +this is the case, it can be noted down here. If not, please make this +clear by providing a '0' in the valTypeHeading field. Same can be done +for the variance of the target heading if not provided. Note that the +heading is given in the ego coordinate system. + +### Movement Classification + +Some sensors provide a classification of the movement of the target. +E.g. this can describe if the object is stationary or crossing etc. A +list as a look up table to provide the correct integer in the input +format is provided below: + +- 0 = no information + +- 1 = unknown + +- 2 = moving + +- 3 = stationary + +- 4 = oncoming + +- 5 = crossingMoving + +- 6 = crossingStationary + +- 7 = stopped + +Note that no information should be used if the sensor does not give this +information (you could also provide a '0' in the +movementClassificationValType field) and unknown if the sensor in theory +provides this information, but does not have it in this particular case. +One could also choose to enrich this information afterwards through the +distance and velocity information measured by the sensor. If this is +done this absolutely needs to be marked in the +movementClassificationValType field by providing a '2'. + +### Confidence of Existence + +Some sensor provide the probability by which they believe the detected +object does exist (or is a false positive). The probability can be given +in percent. + +If the sensor provides the probability separated into several groups, +but no exact percentage, this can be transformed into a percentage. E.g. +a sensor provides a '1' if the probability of existence is \< 25%. If +this is the case use a 25 in the input format. + +Please note that this is the probability of existence, if the sensor +provides the probability for a false detection, this needs to be +transformed correspondingly. + +If the probability is not given at all this is noted in +confidenceOfExistenceValType. + +### MeasState + +The measurement state is provided by some sensors as a result of the +tracking process. It, e.g. describes if an object is newly detected, if +the current state is calculated through an old state, because no current +measurement is available and so on. The lookup table for the different +states is provided below: + +- Unknown/not given = 0 + +- Deleted = 1 + +- New Object = 2 + +- Measured (Object has been confirmed by measurements) = 3 + +- Predicted (Object state could not be confirmed by measurements and + was predicted) = 4 + +- Deleted from merge (Object was deleted as it was merged with another + object) = 5 + +- New from merge (New object created after a merge) = 6 + +If this information is not provided by the sensor this can be noted in +the measStateValType field through a '0'. + +### RCS (val/valType) + +In case the sensor is a radar sensor, some provide the radar cross +section (rcs) along with the object. Please note that the sensors +usually provide the cross section combined with the dB value, rather +than just square meters. + +### Tracking Point + +Lidar sensors can provide which tracking point (feature) of an object +has been detected. This can be imagined as a 3x3 grid on top of the +vehicle, i.e. describing all four corners, the centers of the edges and +the center of the vehicle. Even though the object data described in +[Chapter 5](#object) uses the +center of the vehicle as a reference point, for certain evaluations it +can become important to know from which feature the lidar sensor has +calculated this information. The different points in the grid can be +referenced by integer values found in the following lookup table: + + + +Source: https://unsplash.com/photos/YU9rbNLQxSY + +- 0 = unknown + +- 1 = front right corner + +- 2 = center of front edg + +- 3 = front left corner + +- 4 = center of left edge + +- 5 = center of vehicle + +- 6 = center of right edge + +- 7 = rear left corner + +- 8 = center of rear edge + +- 9 = rear right corner + +Note that 'unknown' should only be used if the sensor in theory has the +possibility to perform feature detection, but did not provide the +information for one or more timestamps. If the sensor is not able to +provide this information at all, this can be noted down by using a '0' +in the field 'trackingPointValType'. + +# Sensor + +The sensor type contains additional information on the sensor used. The +information provided should be as detailed as possible to enable a +meaningful analysis. **If an information is not known provide empty +field in case of strings and '999.0' for all other unknown fields**. +**Information that contributes to the field of view (range and at least +horizontal angle) is compulsory as well as the sensors mounting position +information**. Without this information an meaningful analysis would be +impossible. + +All information is saved as metadata (attributes in hdf5). The +information is saved as follows:\ +*sensor/id/...*\ +Each sensor used for the perception file is contained and identifiable +through its id. I.e., the sensor group can contain a list of different +sensors. This can be necessary if the perception data is gathered by a +sensor featuring different modi for long range, short range etc. or has +different properties for the front and rear direction. Theoretically +this is also possible in case of a sensor fusion. One could provide all +sensors used for the fusion. However, since the fusion algorithm usually +has individual specifications regarding resolution etc., it might be +wise to provide the complete fusion perception as one "sensor" even +though it technically is not one physical sensor. In case you decide to +do so, you can use the field fusionInformation to note down the involved +sensor modalities of the fusion as a list. + +Please note that you need to provide separate files of perception data +for the different sensor modalities. I.e. there will be one perception +file for the camera data and one separate for the radar data recorded +during the reference recording X. + +If the actual sensor recording and the perception information differ, +e.g. when using an offline perception algorithm, provide the information +for the perception algorithm. Always provide the specifications for the +data that is contained in the file. + +## Sensor Modality + +- Lidar: 1 + +- Camera: 2 + +- Radar_SR: 3 + +- Radar_MR: 4 + +- Radar_LR: 5 + +- Fusion: 6 + +## fusionInformation + +In case the sensor modality is a fusion, it can be wise to note down the +sensor modalities that were used for the fusion. E.g. a fusion of radar +and camera data. This can be done using this signal. Just provide the +involved sensor modalities as strings, e.g. "radar camera". If no fusion +is used this field can be left blanc. + +## FirmwareVersion + +The number of the firmware running on the sensor used. + +## OriginalUpdateRate + +The original update rate the sensor comes with. E.g. 40Hz or 10Hz. The +analyst then knows if the data has been up or down sampled to 10Hz / 25 +Hz. + +## SensorPos + +The sensor position within the vehicle coordinate system is provided +through three values: sensorPosLongitudinal, sensorPosLateral and +sensorPosZ. Those values describe the mounting position of the sensor +and are necessary in order to transform data from the vehicle coordinate +system into the sensor coordinate system. sensorPosLongitudinal contains +the value along the x-axis of the vehicle coordinate system, +sensorPosLateral the value along the y-axis and sensorPosZ along the +z-axis. + +## Sensor Orientation + +Orientation of sensor in vehicle coordinate system. Please note that the +torsion of the sensor is given in the vehicle coordinate system that is +not rotated itself. Therefore, the order of applying the different +angles does not matter. However, it is important to first apply the +shift of origin given in SensorPos and then proceed with the torsion. +Angles are given in degree. + +## Range + +Min and max range describe the minimum and maximum range in with the +sensor can detect objects. + +## VelocityRange + +Min and may value of velocity the sensor can detect. + +## Field of View + +The field of view can be provided in terms of opening angles +horizontally and vertically. The horizontal field of view should be +known for each sensor. The field of view can be provided by a value in +degree. The field of view should be provided in full. I.e. the angle +provided will be applied in the provided position of the sensor equally +to the left and right hand side. + +## Resolution + +The resolution describes from which value upwards the sensor is able to +differentiate between to potential objects. This value can be provided +for different distances (vertical and horizontal/range), for different +angles (vertically and horizontally) and different velocities. + +## Accuracy + +Accuracy and precision are often not precisely differentiated. According +to definition accuracy would be a combination of trueness and precision +(see Figure 2). However, this is often not differentiated and the term +accuracy is used as a synonym for trueness. Therefore, it is sometimes +not known, if the value provided in a data sheet is the value for +accuracy, trueness or the one for precision. + +When being very precise the term accuracy actually describes the +combination of trueness and precision. Similar to most datasheet from +manufacturers we stick with the term accuracy here even though trueness +would be more precise (in most cases, some manufactures simply do not +give this information). It describes how correct a measurement is, i.e., +how far it is off. + +## Precision + +Accuracy and precision are often not precisely differentiated. According +to definition accuracy would be a combination of trueness and precision +(see Figure 2). However, this is often not differentiated and the term +accuracy is used as a synonym for trueness. Therefore, it is sometimes +not known, if the value provided in a data sheet is the value for +accuracy, trueness or the one for precision. + +If measurement is repeated several times, a sensor should give a very +similar result each time, if the precision is high. Those measurements +can be off from the ground truth by a lot and still have a high +precision, but not a high trueness. + +![](images/image_2.png) + +Figure 2 - Accuracy - Combination of trueness and precision \[Source: +[link](https://en.wikipedia.org/wiki/Accuracy_and_precision)\] + +## Track Timing Information + +TrackConfirmationLatency contains information on how long it will take +the sensors internal tracking until it outputs an object. +TrackDropLatency provides information on how long the sensors internal +tracking will still output an object even though it has actually +disappeared. + +## maxObjectTracks + +The field maxObjectTracks contains the maximum number of objects the +sensor's algorithms can track if that is known. + +# Sensor Datasheet + +Along with the information given in the input format each data provider +should provide the data sheets of the sensors used for the recording. + +# egoPosition + +The ego position fields can be filled with GNSS data or other +self-determined positioning data. It contains longitude (east), latitude +(north) and elevation information as well as heading, yaw rate and +pitch. The longitude and latitude information and the elevation +information as well as the heading can be enriched with information on +their variance. Longitude and latitude can be provided as it is in its +absolute form (no need to provide it relative to an absolute point as in +reference format). The angle definition of the coordinate system follows +the same rules as in the input format for the scenario data base. The +heading angle is equal to zero when looking in the direction of the +longitude axis (east). It is defined in the mathematical positive sense +of rotation, given in degrees and between 0 and 360 degrees. The pitch +angle is provided in the ego coordinate system / vehicle coordinate +system. It is also given in the mathematical positive sense, i.e., it is +positive when stopping. It is also provided in degrees from 0 to +/- +180. + +# Additional Sensors + +Signals for additional sensors might be added in the future, once it has +been decided within this project if such sensors are used and what data +they can deliver. At the moment discussions are taking place for +acoustic sensors and light intensity sensors. diff --git a/doc/signal_list_reference.md b/doc/signal_list_reference.md index b5e42c1..a3c38c0 100644 --- a/doc/signal_list_reference.md +++ b/doc/signal_list_reference.md @@ -5,12 +5,12 @@ | _posX_ | _Polyline, Trajectory_ | - | - | - | _Measured_ | _X coordinate in polyline_ | _double (Relativ to Lat/Long reference point)_ | _m_ | _0,2 m_ | _25 Hz / 10Hz_ | - | | _posY_ | _Polyline, Trajectory_ | - | - | - | _Measured_ | _Y coordinate in polyline_ | _double (Relativ to Lat/Long reference point)_ | _m_ | _0,2 m_ | _25 Hz / 10Hz_ | - | | _posZ_ | _Polyline, Trajectory_ | - | - | - | _Measured_ | _Z coordinate in polyline (elevation)_ | _double (Relativ to Lat/Long reference point)_ | _m_ | _0,2 m_ | _25 Hz / 10Hz_ | - | -| _heading_ | _Trajectory_ | - | - | - | _Measured_ | _Heading angle of object_ | _double_ | _rad_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | -| _pitch_ | _Trajectory_ | - | - | - | _Measured_ | _Pitch angle of object_ | _double_ | _rad_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | -| _roll_ | _Trajectory_ | - | - | - | _Measured_ | _Roll angle of object_ | _double_ | _rad_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | -| _headingDer_ | _Trajectory_ | - | - | - | _Measured_ | _Heading rate_ | _double_ | _rad/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | -| _pitchDer_ | _Trajectory_ | - | - | - | _Measured_ | _Pitch rate_ | _double_ | _rad/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | -| _rollDer_ | _Trajectory_ | - | - | - | _Measured_ | _Roll rate_ | _double_ | _rad/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | +| _heading_ | _Trajectory_ | - | - | - | _Measured_ | _Heading angle of object_ | _double_ | _deg_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | +| _pitch_ | _Trajectory_ | - | - | - | _Measured_ | _Pitch angle of object_ | _double_ | _deg_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | +| _roll_ | _Trajectory_ | - | - | - | _Measured_ | _Roll angle of object_ | _double_ | _deg_ | _0,035 rad (2°)_ | _25 Hz / 10Hz_ | - | +| _headingDer_ | _Trajectory_ | - | - | - | _Measured_ | _Heading rate_ | _double_ | _deg/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | +| _pitchDer_ | _Trajectory_ | - | - | - | _Measured_ | _Pitch rate_ | _double_ | _deg/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | +| _rollDer_ | _Trajectory_ | - | - | - | _Measured_ | _Roll rate_ | _double_ | _deg/s_ | _0,035 rad/s_ | _25 Hz / 10Hz_ | - | | _velLongitudinal_ | _Trajectory_ | - | - | - | _Measured_ | _Velocity in longitudinal direction (vehicle coordinate system)_ | _double_ | _m/s_ | _0,1 m/s_ | _25 Hz / 10Hz_ | - | | _velLateral_ | _Trajectory_ | - | - | - | _Measured_ | _Velocity in lateral direction (vehicle coordinate system)_ | _double_ | _m/s_ | _0,1 m/s_ | _25 Hz / 10Hz_ | - | | _velZ_ | _Trajectory_ | - | - | - | _Measured_ | _Velocity in z-direction (vehicle coordinate system) | _double_ | _m/s_ | _0,1 m/s_ | _25 Hz / 10Hz_ | - | @@ -30,6 +30,8 @@ | recorderNumber | - | - | - | yes | Determined | Number assigned to the partner performing the recording. Together with recorder number unique filename to connect ground truth recordings with the corresponding sensor under test recordings | int | - | - | - | Same name can be found in perception input format. | | recordingNumber | - | - | - | yes | Determined | Number of recording. Together with recorder number unique filename to connect ground truth recordings with the corresponding sensor under test recordings | int | - | - | - | Same name can be found in perception input format. | | converterVersion | - | - | yes | Determined | Can be filled with version of tool to merge road data, dynamic objects and weather data | string | - | - | - | - | +| referenceModality | - | - | - | yes | Determined | Include information on the method used to gather the reference data | int | - | - | - | Lookup table will be provided | +| customInformation | - | - | - | yes | Determined | Include custom information that you want to be contained in the file but does not fit in any other signal | string | - | - | - | - | | naturalBehavior | - | - | - | yes | Determined | Flag to save if the measurement is part of a planned snippet scenario or naturalistic driving data | bool | - | - | - | Needed in order to determine if driving was naturalistic | | naturalExposure | - | - | - | yes | Determined | Flag to save wether this recording reflects true exposure. Set to false if data has been preprocessed to find "interesting" scenario of some form | bool | - | - | - | Needed in order to calculate true exposure of scenarios | | daytime | - | - | - | yes | Measured | Exact date and time at beginning of recording | string | - | - | - | Timestamps are only seconds, therefore date and time at beginning needed. Rest can be calculated by summation. String format: yymmddhhmmss | @@ -100,7 +102,7 @@ | connectedTo | road/{id} | sign/{id} | - | no | Measured /Enriched | To which other traffic sign is this sign connected | List of id tuples | - | - | - | most relevant for traffic lights or additional signs such as steep curve in combination with speed limit signs | | fallback | road/{id} | sign/{id} | - | yes | Measured /Enriched | Flag if this sign is only a fallback, e.g., for yield signs at traffic lights if traffic light is broken | bool | - | - | - | - | | polyline | road/{id} | sign/{id} | - | no | Measured /Enriched | Position of the traffic sign in x,y,z | double (One polyline point posX, posY, posZ) | - | 0,2 m | - | fields for posX, posY and posZ | -| heading | road/{id} | sign/{id} | - | yes | Measured /Enriched | Rotation of traffic sign around its height axis | double | rad | - | - | - | +| heading | road/{id} | sign/{id} | - | yes | Measured /Enriched | Rotation of traffic sign around its height axis | double | deg | - | - | - | | overrides | road/{id} | sign /{id} | - | no | Determined | Relevant if an existing object in the DGT is modified (Layer 3 Information). ID of original object that has been modified can be provided here | id tuple | - | - | - | Allows to use same type definitions for layer 1,2 and 3. Modifications on existing objects can be made visible. Objects are crossreferenced | | overriddenBy | road/{id} | sign/{id} | - | no | Determined | Relevant if an existing object in the DGT is modified (Layer 3 information). ID of the modified object can be provided here | id tuple | - | - | - | Allows to use same type definitions for layer 1,2 and 3. Modifications on existing objects can be made visible. Objects are crossreferenced | | layerFlag | road/{id} | sign/{id} | - | yes | Determined | Note down if information is temporary | int | - | - | - | - | diff --git a/doc/specification.md b/doc/specification.md index f3b19a6..c631487 100644 --- a/doc/specification.md +++ b/doc/specification.md @@ -8,13 +8,12 @@ Art: Dokumentation/Bericht

Ersteller: Maike Scholtes (ika) --- -Version 3.0 +Version 3.1 --- Gefördert durch:
*[Logo: BMWI]* *[Logo: VDA]* -## Inhaltsverzeichnis **Versionen** @@ -37,6 +36,132 @@ Gefördert durch:
*[Logo: BMWI]* *[Logo: VDA]* | Maike Scholtes | 02.12.2020 | V2.4 | Added “Sinnbilder” for markings | | Maike Scholtes | 17.03.2021 | V2.5 | Added different converter numbers.
Made changes to what needs to be provided instead if something can not be provided. | | Maike Scholtes | 23.03.2021 | V3.0 | Introduction of attributes in hdf5 for scalar metainformation | +| Maike Scholtes | 18.08.2021 | V3.0 | Provide information on the direction of the pitch angle.
Clarify information on the heading angle and on which signals are provided in utm coordinate systems
and which in the vehicle coordinate system.
Included Image of global coordinate system. | +| Maike Scholtes | 10.09.2021 | V3.1 | Added customInformation and referenceModality field. | + +- [Specification OMEGA Format - Reference + Data](#specification-omega-format---reference-data) +- [Table of Contents](#table-of-contents) + - [1. Introduction and Information on this + Document](#1-introduction-and-information-on-this-document) + - [Introduction and Information on this + Document](#introduction-and-information-on-this-document-1) + - [Used Input](#used-input) + - [2. General Information](#2-general-information) + - [2.1 Attributes](#21-attributes) + - [2.2 Lookup Tables](#22-lookup-tables) + - [2.3 IDs](#23-ids) + - [2.4 Temporary Modifications](#24-temporary-modifications) + - [2.5 Mark something as invalid or + empty](#25-mark-something-as-invalid-or-empty) + - [2.6 Polyline](#26-polyline) + - [2.7 Timestamps](#27-timestamps) + - [2.8 Coordinate System](#28-coordinate-system) + - [2.9 Trajectory](#29-trajectory) + - [3. Attributes on Top Level](#3-attributes-on-top-level) + - [3.1 formatVersion](#31-formatversion) + - [3.2 RecorderNumber](#32-recordernumber) + - [3.3 RecordingNumber](#33-recordingnumber) + - [3.4 converterVersion](#34-converterversion) + - [3.5 referenceModality](#35-referencemodality) + - [3.6 customInformation](#36-custominformation) + - [3.7 Natural Behavior](#37-natural-behavior) + - [3.8 Natural Exposure](#38-natural-exposure) + - [3.9 Reference Point + (refPointLat/refPointLong)](#39-reference-point-refpointlatrefpointlong) + - [3.10 Daytime](#310-daytime) + - [4. Road](#4-road) + - [4.1 converterVersion](#41-converterversion-1) + - [4.2 Location](#42-location) + - [4.3 Border](#43-border) + - [5 Lane](#5-lane) + - [5.1 Type](#51-type) + - [5.2 Sub Type](#52-sub-type) + - [5.3 Class](#53-class) + - [5.4 BorderRight/Left](#54-borderrightleft) + - [5.5 invertedRight/Left](#55-invertedrightleft) + - [5.6 Predecessor/Successor](#56-predecessorsuccessor) + - [6. Boundary](#6-boundary) + - [6.1 Type](#61-type-1) + - [6.2 Sub Type](#62-sub-type-1) + - [6.3 Right](#63-right) + - [6.4 + polyIndexStart/polyIndexEnd](#64-polyindexstartpolyindexend) + - [6.5 Height](#65-height) + - [6.6 Color](#66-color) + - [6.7 Condition](#67-condition) + - [7. Sign](#7-sign) + - [7.1 Type](#71-type-2) + - [7.2 Value](#72-value) + - [7.3 Size Class](#73-size-class) + - [7.4 History](#74-history) + - [7.5 + Timedependent/Weatherdependent](#75-timedependentweatherdependent) + - [7.6 Applicable Lanes](#76-applicable-lanes) + - [7.7 Connected To](#77-connected-to) + - [7.8 Fallback](#78-fallback) + - [7.9 Polyline](#79-polyline-1) + - [7.10 Heading](#710-heading-1) + - [8. Flat Marking](#8-flat-marking) + - [8.1 Type](#81-type-3) + - [8.2 Value](#82-value-1) + - [8.3 Polyline](#83-polyline-2) + - [8.4 Color](#84-color-1) + - [8.5 Condition](#85-condition-1) + - [9. Lateral Marking](#9-lateral-marking) + - [9.1 Type](#91-type-4) + - [9.2 Polyline](#92-polyline-3) + - [9.3 LongSize](#93-longsize) + - [9.4 ApplicableLanes](#94-applicablelanes) + - [9.5 Color](#95-color-2) + - [9.6 Condition](#96-condition-2) + - [10. Surface](#10-surface) + - [10.1 Material](#101-material) + - [10.2 Color](#102-color-3) + - [10.3 Condition](#103-condition-3) + - [11 Road Object](#11-road-object) + - [11.1 Type](#111-type-5) + - [11.2 Polyline](#112-polyline-4) + - [11.3 Height](#113-height-1) + - [11.4 Driveable/walkable](#114-driveablewalkable) + - [12 Structural Object](#12-structural-object) + - [12.1 Type](#121-type-6) + - [12.2 Polyline](#122-polyline-5) + - [12.3 Height](#123-height-2) + - [13 Road User](#13-road-user) + - [13.1 converterVersion](#131-converterversion-2) + - [13.2 Type](#132-type-7) + - [13.3 Sub Type](#133-sub-type-2) + - [13.5 birthStamp](#135-birthstamp) + - [13.6 Trajectory](#136-trajectory-1) + - [13.7 isDataRecorder](#137-isdatarecorder) + - [13.8 Bounding Box](#138-bounding-box) + - [13.9 Vehicle Lights](#139-vehicle-lights) + - [14. Misc Object](#14-misc-object) + - [14.1 Type](#141-type-8) + - [14.2 Sub Type](#142-sub-type-3) + - [14.3 Trajectory](#143-trajectory-2) + - [14.4 birthStamp](#144-birthstamp-1) + - [14.5 Bounding Box](#145-bounding-box-1) + - [15. State](#15-state) + - [15.1 ReferenceID](#151-referenceid) + - [15.2 Value](#152-value-2) + - [16. weather](#16-weather) + - [16.1 converterVersion](#161-converterversion-3) + - [16.2 Source](#162-source) + - [16.3 Weather Sation ID](#163-weather-sation-id) + - [16.4 Precipitation](#164-precipitation) + - [16.5 Wind](#165-wind) + - [16.6 Gust of Wind](#166-gust-of-wind) + - [16.7 Cloudiness](#167-cloudiness) + - [16.8 Road Condition](#168-road-condition) + - [16.9 Temperature](#169-temperature) + - [16.10 Humidity](#1610-humidity) + - [16.11 Air Pressure](#1611-air-pressure) + - [16.12 Visibility](#1612-visibility) + - [16.13](#1613-section) + - [17. Literaturverzeichnis](#17-literaturverzeichnis) + ## 1. Introduction and Information on this Document @@ -113,6 +238,8 @@ The whole description features two coordinate systems. One is the main, absolute All positions are described relative to this one absolute point and its UTM coordinate system. This is the case for the complete description of the road and its components as well as for the position of road users and misc objects. +Global coordinate system + **For such objects, however, velocity and acceleration are described in the vehicle coordinate system (ISO 8855, see Figure 1).** This allows for defining velocity and acceleration in longitudinal and lateral (in the sense of automotive engineering not geodetically) direction (z-direction is constant). The coordinate systems can be transformed into each other using the heading angle. Please note that in the geodetical coordinate system the “east-west axis” (x-axis) points to the right while in the automotive coordinate system (y-axis) it points to the left. Vehicle coordinate system @@ -136,13 +263,13 @@ When describing height information of buildings etc. and other objects the heigh **The position of each object is described relative to the one absolute point described in the previous section.** The previously described UTM coordinate system is used to describe the axis. **For all road users or misc objects the geometrical center of the object is taken to describe the position.** The z-position is equivalent to the altitude. #### 2.9.2 Heading -**The heading angle is defined as the angle around the z-axis.** This is true for the UTM coordinate system and the vehicle coordinate system. Therefore, the heading angle can be used to transform from one system into the other. Heading angles are provided in the mathematically positive direction (counter clockwise). **A heading angle of zero corresponds with the east direction (x-axis in UTM coordinate system).** -In the area of normal driving dynamics (which we expect in the data) it is assumed that heading and yaw are equivalent. Therefore, it is enough to save the heading angle of road users. +**The heading angle is defined as the angle around the z-axis.** This is true for the UTM coordinate system and the vehicle coordinate system. Therefore, the heading angle can be used to transform from one system into the other. Heading angles are provided in the mathematically positive direction (counter clockwise). **A heading angle of zero corresponds with the east direction (x-axis in UTM coordinate system). The heading angle is NOT given in the vehicle coordinate system, but in the UTM coordinate system. The heading angle is between 0 and 360 degree.** -#### 2.9.3 Pitch -The pitch angle is provided in the vehicles coordinate system. It is the angle in respect to the y-axis in the vehicle coordinate system. It is also provided in mathematically positive direction. An angle of zero corresponds to the x-y-plane. + In the area of normal driving dynamics (which we expect in the data) it is assumed that heading and yaw are equivalent. Therefore, it is enough to save the heading angle of road users +#### 2.9.3 Pitch +The pitch angle is provided in the vehicles coordinate system. It is the angle in respect to the y-axis in the vehicle coordinate system. It is also provided in mathematically positive direction. An angle of zero corresponds to the x-y-plane. The pitch angle is defined in the mathematical positive sense, i.e., it is positive when stopping.It is given in degrees and is between 0 and +/-180. #### 2.9.4 Roll The roll angle is provided in the vehicles coordinate system. It is the angle in respect to the x-axis in the vehicle coordinate system. It is also provided in mathematically positive direction. An angle of zero corresponds to the x-y-plane. @@ -172,19 +299,35 @@ The filename needs to be provided in order to be able to couple the data provide The numbers of the individual recordings can be chosen by the partner providing the data. It is important that those numbers are unique and that the ground truth recording and the corresponding sensor under test recording from the same drive get the same number. -### 3.4 mergerVersion +### 3.4 converterVersion +The field converterVersion in the top level refers to the version number of the tool used to merge all data together (if this was the case) + Version number of merger used to merge map data, dynamic information and weather data. Can be provided as string in the format “x.y”. -### 3.5 Natural Behavior +### 3.5 referenceModality + +Please provide the method used to gather the reference / ground truth data. This can e.g. be the use of infrastructure sensors, a measuring vehicle equipped with high class sensors (with and without human annotations), RTK GNSS system or drones. If the method that was used for the data is not contained in the list below, please select other and give some information in the customInformation field. + +- 1: Measuring vehicle equipped with sensors and additional human labelling +- 2: Measuring vehicle equipped with sensors without any additional human labelling +- 3: Use of drones +- 4: Infrastructure sensors +- 5: RTK GNSS system +- 6: Other + +### 3.6 customInformation +Please provide any additional information that you want the user / the analyst to have that does not fit into any other signal. + +### 3.7 Natural Behavior In order to be able to judge the extracted scenarios, it is important to note down if the data at hand was recorded during a purposely, planned and staged scenario or if the data represents naturalistic driving data. If the data represents non naturalistic driving data please set the flag to false otherwise it is set to true. If the recording is only performed via button push after witnessing an “interesting” situation, this would also be non-naturalistic data. The information is saved as an attribute. The flag is set as an attribute. -### 3.6 Natural Exposure +### 3.8 Natural Exposure In order to calculate true exposure it is important to know if the scenario was purposely staged (see above) or naturalistic. If it was purposely staged, it can not have true exposure in the data, therefore, this flag needs to be set to false. If the recording is only performed via button push after witnessing an “interesting” situation, this would also be non natural exposure (set flag to false). Furthermore, the flag needs to be set to false if the data has been preprocessed to only upload “interesting” scenarios. The flag is set as an attribute. -### 3.7 Reference Point (refPointLat/refPointLong) +### 3.9 Reference Point (refPointLat/refPointLong) The two data fields are used to note down the latitudinal and longitudinal values of the chosen reference point. More information regarding the coordinate systems can be found in chapter 2.7. The information is saved as an attribute. -### 3.8 Daytime +### 3.10 Daytime The exact date and time at the beginning of the recording. The date needs to be saved in the format: yyyymmddhhmmss. It is saved as an attribute. @@ -335,11 +478,11 @@ List: - solid_dashed = 4 (first mentioned type of line is the one on the inside of the lane, i.e. the one valid for the driver) - (vehicle driving on right hand side) - solid_dashed + dashed_solid - dashed_solid = 5 (first mentioned type of line is the one on the inside of the lane, i.e. the one valid for the driver) - (vehicle driving on right hand side) - dashed_solid + solid_dashed - dashed_changeDirectionLane = 6 - Lanes that can be used for both directions (permission displayed on changable signs) are divided by double dashed lines. diff --git a/omega_format/cli.py b/omega_format/cli.py index 2b612d8..34cf279 100644 --- a/omega_format/cli.py +++ b/omega_format/cli.py @@ -19,16 +19,17 @@ def get_snippets_for_vis(reference, perception, snip, max_snippets): + validate = True if reference is not None: - rr = ReferenceRecording.from_hdf5(reference) + rr = ReferenceRecording.from_hdf5(reference, validate) if snip and perception is None: snippets = [SnippetContainer(reference=snippet) for snippet in rr.extract_snippets(max_snippets)] elif perception is not None: - snippets = [SnippetContainer(perception=PerceptionRecording.from_hdf5(perception), reference=rr)] + snippets = [SnippetContainer(perception=PerceptionRecording.from_hdf5(perception, validate), reference=rr)] else: snippets = [SnippetContainer(reference=rr)] elif perception is not None: - snippets = [SnippetContainer(perception=PerceptionRecording.from_hdf5(perception))] + snippets = [SnippetContainer(perception=PerceptionRecording.from_hdf5(perception, validate))] else: raise ValueError('Either define a reference filename or a perception filename') return snippets diff --git a/omega_format/dynamics/bounding_box.py b/omega_format/dynamics/bounding_box.py index 12225b9..af8565d 100644 --- a/omega_format/dynamics/bounding_box.py +++ b/omega_format/dynamics/bounding_box.py @@ -9,7 +9,7 @@ class BoundingBox(BaseModel): class Config(PydanticConfig): pass - vec: np.ndarray + vec: np.ndarray = np.array([], dtype=np.float64) confident_length: bool = True confident_width: bool = True diff --git a/omega_format/dynamics/dynamic_object.py b/omega_format/dynamics/dynamic_object.py index 24d6afb..a980fa4 100644 --- a/omega_format/dynamics/dynamic_object.py +++ b/omega_format/dynamics/dynamic_object.py @@ -1,5 +1,6 @@ from dataclasses import fields from pydantic import conint +from pydantic.fields import Field import numpy as np @@ -37,8 +38,8 @@ def timespan_to_cutoff_idxs(obj, birth, death): class DynamicObject(InputClassBase, BBXCornersClass): - bb: BoundingBox - tr: Trajectory + bb: BoundingBox = Field(default_factory=BoundingBox) + tr: Trajectory = Field(default_factory=Trajectory) birth: conint(ge=0) """first timestamp idx""" diff --git a/omega_format/dynamics/misc_object.py b/omega_format/dynamics/misc_object.py index 07e8b44..7de8c14 100644 --- a/omega_format/dynamics/misc_object.py +++ b/omega_format/dynamics/misc_object.py @@ -1,4 +1,5 @@ from h5py import Group +from pydantic.fields import Field from .dynamic_object import DynamicObject from ..enums import ReferenceTypes @@ -8,8 +9,8 @@ class MiscObject(DynamicObject): - type: ReferenceTypes.MiscObjectType - sub_type: ReferenceTypes.MiscObjectSubType + type: ReferenceTypes.MiscObjectType = Field(default_factory=ReferenceTypes.MiscObjectType) + sub_type: ReferenceTypes.MiscObjectSubType = Field(default_factory=ReferenceTypes.MiscObjectSubType) @classmethod def from_hdf5(cls, group: Group, validate=True): diff --git a/omega_format/dynamics/road_user.py b/omega_format/dynamics/road_user.py index d4adf73..6277832 100644 --- a/omega_format/dynamics/road_user.py +++ b/omega_format/dynamics/road_user.py @@ -1,4 +1,4 @@ -from pydantic.fields import Field +from pydantic.fields import Field, Any from .dynamic_object import DynamicObject from .vehicle_lights import VehicleLights @@ -6,14 +6,15 @@ from ..reference_resolving import * from .trajectory import Trajectory from .bounding_box import BoundingBox +from typing import Optional from h5py import Group class RoadUser(DynamicObject): - type: ReferenceTypes.RoadUserType - sub_type: ReferenceTypes._RoadUserSubType - connected_to: ReferenceElement = None + type: ReferenceTypes.RoadUserType = ReferenceTypes.RoadUserType.REGULAR + sub_type: Any = None # ReferenceTypes.RoadUserSubType + connected_to: Optional[ReferenceElement] = None is_data_recorder: bool = False vehicle_lights: VehicleLights = Field(default_factory=VehicleLights) id: int = -1 @@ -53,7 +54,8 @@ def to_hdf5(self, group: Group): group.attrs.create('subtype', data=self.sub_type) if self.connected_to is not None: group.attrs.create('connectedTo', data=self.connected_to.reference) - + else: + group.attrs.create('connectedTo', data=-1) self.bb.to_hdf5(group.create_group('boundBox')) self.tr.to_hdf5(group.create_group('trajectory')) self.vehicle_lights.to_hdf5(group.create_group('vehicleLights')) diff --git a/omega_format/dynamics/trajectory.py b/omega_format/dynamics/trajectory.py index d33e94e..7ee8b6f 100644 --- a/omega_format/dynamics/trajectory.py +++ b/omega_format/dynamics/trajectory.py @@ -6,17 +6,17 @@ from ..reference_resolving import raise_not_resolved from ..pydantic_utils.pydantic_config import PydanticConfig - +from warnings import warn class Trajectory(BaseModel): class Config(PydanticConfig): pass - pos_x: np.ndarray - pos_y: np.ndarray - pos_z: np.ndarray - roll: np.ndarray - pitch: np.ndarray - heading: np.ndarray + pos_x: np.ndarray = np.array([], dtype=np.float64) + pos_y: np.ndarray = np.array([], dtype=np.float64) + pos_z: np.ndarray = np.array([], dtype=np.float64) + roll: np.ndarray = np.array([], dtype=np.float64) + pitch: np.ndarray = np.array([], dtype=np.float64) + heading: np.ndarray = np.array([], dtype=np.float64) vel_longitudinal: Optional[np.ndarray] vel_lateral: Optional[np.ndarray] @@ -30,17 +30,13 @@ class Config(PydanticConfig): heading_der: Optional[np.ndarray] @validator('*') # the '*' means that this validator is applied to each member of Trajectory - def check_array_length(cls, v, values): - - if not len(v) > 0: - raise ValueError('received trajectory with empty array') - + def check_array_length(cls, v, values, **kwargs): if len(values) > 0: # first array would be validated if len(values)=0 -> no length to compare against # use the length of pos_x to check equality with other array length length = len(values.get('pos_x')) - if len(v) != length: - raise ValueError(f'length of all trajectory arrays must match, expected len {len(v)}, actual len {length}') + if len(v) != length and len(v) > 0: + raise ValueError(f'length of all trajectory arrays must match, expected len {length}, actual len {len(v)}') return v @validator('vel_longitudinal', 'vel_lateral') @@ -104,18 +100,26 @@ def to_hdf5(self, group: Group): @cached_property def vel(self): - return np.sqrt(np.power(self.vel_lateral, 2) + np.power(self.vel_longitudinal, 2) + np.power(self.vel_z, 2)) + if self.vel_lateral is None or self.vel_longitudinal is None: + return None + return np.sqrt(np.power(self.vel_lateral, 2) + np.power(self.vel_longitudinal, 2))# + np.power(self.vel_z, 2)) @cached_property def acc(self): - return np.sqrt(np.power(self.acc_lateral, 2) + np.power(self.acc_longitudinal, 2) + np.power(self.acc_z, 2)) + if self.acc_lateral is None or self.acc_longitudinal is None: + return None + return np.sqrt(np.power(self.acc_lateral, 2) + np.power(self.acc_longitudinal, 2))# + np.power(self.acc_z, 2)) @cached_property def is_still(self, vel_thresh=0.1, acc_thresh=0.1): + if self.vel is None or self.acc is None: + return None return np.logical_and(self.vel <= vel_thresh, self.acc <= acc_thresh) @cached_property def is_static(self): + if self.is_still is None: + return None return np.all(self.is_still) @cached_property diff --git a/omega_format/enums/perception_types.py b/omega_format/enums/perception_types.py index d9bef61..59c601b 100644 --- a/omega_format/enums/perception_types.py +++ b/omega_format/enums/perception_types.py @@ -2,7 +2,7 @@ class PerceptionTypeSpecification(Enum): - FORMAT_VERSION = 'v1.2' + FORMAT_VERSION = 'v1.3' class PerceptionType(IntEnum): NOT_PROVIDED = 0 diff --git a/omega_format/meta_data.py b/omega_format/meta_data.py index 33c5002..6f76d24 100644 --- a/omega_format/meta_data.py +++ b/omega_format/meta_data.py @@ -34,6 +34,9 @@ class MetaData(InputClassBase): state_converter_version: str = None misc_object_converter_version: str = None + custom_information: str = "" + reference_modality: int = None + @property def version_identifier(self): none2string = lambda x: "0.0" if x is None else x @@ -89,6 +92,8 @@ def from_hdf5(cls, file: File, validate: bool = True): weather_converter_version=get_converter_version(file, "weather"), state_converter_version=get_converter_version(file, "state"), misc_object_converter_version=get_converter_version(file, "miscObject"), + custom_information=cls.get_attribute(file, 'customInformation'), + reference_modality=cls.get_attribute(file, 'referenceModality'), ) return self @@ -120,6 +125,9 @@ def to_hdf5(self, group: Group): self.write_converter_version(group, "state", self.state_converter_version) self.write_converter_version(group, "miscObject", self.misc_object_converter_version) + group.attrs.create("customInformation", data=self.custom_information) + group.attrs.create("referenceModality", data=self.reference_modality) + @classmethod def assure_string(cls, byte_array): if type(byte_array) == bytes: diff --git a/omega_format/perception/__init__.py b/omega_format/perception/__init__.py index 7b8f94e..67737bd 100644 --- a/omega_format/perception/__init__.py +++ b/omega_format/perception/__init__.py @@ -1,6 +1,6 @@ from .ego_position import EgoPosition from .meta_object import MetaObject from .misc_info import MiscInfo -from .object import Object, ObjectClassification, TrackingPoint +from .object import Object, ObjectClassification from .sensor import Sensor from .valvar import ValVar diff --git a/omega_format/perception/config.json b/omega_format/perception/config.json new file mode 100644 index 0000000..bee1ac6 --- /dev/null +++ b/omega_format/perception/config.json @@ -0,0 +1,5 @@ +{ + "converterVersion": "2.0", + "egoOffset": -1.2 + } + \ No newline at end of file diff --git a/omega_format/perception/converter.py b/omega_format/perception/converter.py new file mode 100644 index 0000000..ff78f21 --- /dev/null +++ b/omega_format/perception/converter.py @@ -0,0 +1,443 @@ +import os +import random +import tempfile +from collections import UserDict +from copy import deepcopy +from json import load +from pathlib import Path +from typing import Union, Optional + +import h5py +import numpy as np +import omega_format +import pyproj +import typer +from PyQt5 import QtWidgets +from PyQt5.QtGui import QTransform +from PyQt5.QtCore import QPointF +from ..dynamics import RoadUser +from ..reference_recording import ReferenceRecording +from ..perception_recording import PerceptionRecording +from ..enums import PerceptionTypes, ReferenceTypes +from omega_format.perception import Object, Sensor +from tqdm import tqdm + +perc_format_ver = "1.3" +format_version = omega_format.__clean_version__ + + +class Converter: + perception_recording: PerceptionRecording + reference_recording: ReferenceRecording + ego_id: int + ego_offset: float + ego_obj: RoadUser + converter_version: str + config_dict: dict + object_map: dict + transformer: pyproj.Transformer + sensor_config_dict: dict + sensors: dict + original_obj_id = dict + sensor_list: list + + def __init__(self, reference_recording: ReferenceRecording, ego_id: int, ego_offset: float = None, + sensor_config_dict: dict = None, sensors: Union[dict, UserDict] = None, converter_version: str = None): + self.reference_recording = reference_recording + self.perception_recording = PerceptionRecording() + self.ego_id = ego_id + self.ego_offset = ego_offset + self.ego_obj = self.reference_recording.ego_vehicle + self.converter_version = converter_version + self.original_obj_id = dict() + + self.transformer = pyproj.Transformer.from_crs(pyproj.CRS('EPSG:25832').geodetic_crs, pyproj.CRS('EPSG:25832')) + + with open(os.path.abspath(os.path.dirname(__file__)) + '/config.json', "r") as file: + self.config_dict = load(file) + # sensor information can be either provided from config or from parsed perception/sensor dictionary + self.sensor_config_dict = sensor_config_dict + self.sensors = sensors + self.sensor_list = [] + + reference_type = ReferenceTypes.RoadUserType + perception_type = PerceptionTypes.ObjectClassification + self.object_map = { + reference_type.REGULAR: perception_type.CAR, + reference_type.CAR: perception_type.CAR, + reference_type.TRUCK: perception_type.TRUCK, + reference_type.BUS: perception_type.TRUCK, + reference_type.MOTORCYCLE: perception_type.MOTORCYCLE, + reference_type.BICYCLE: perception_type.BICYCLE, + reference_type.PEDESTRIAN: perception_type.PEDESTRIAN, + reference_type.PUSHABLE_PULLABLE: perception_type.NO_INFO, + reference_type.WHEELCHAIR: perception_type.SMALLER_THAN_CAR, + reference_type.PERSONAL_MOBILITY_DEVICE: perception_type.SMALLER_THAN_CAR, + reference_type.TRAILER: perception_type.BIGGER_THAN_CAR, + reference_type.FARMING: perception_type.BIGGER_THAN_CAR, + reference_type.RAIL: perception_type.BIGGER_THAN_CAR, + reference_type.CARRIAGE: perception_type.BIGGER_THAN_CAR, + } + + def convert_to_perception_format(self): + self.add_meta_data() + self.add_ego_position() + self.add_meta_object() + self.add_objects() + self.add_sensors() + self.filter_objects_outside_sensor_fov() + return self.perception_recording, self.original_obj_id + + def add_meta_data(self): + self.perception_recording.converter_version = self.converter_version if self.converter_version else self.config_dict['converterVersion'] + self.perception_recording.ego_offset = self.ego_offset if self.ego_offset else float(self.config_dict['egoOffset']) + + self.perception_recording.recorder_number = self.reference_recording.meta_data.recorder_number + self.perception_recording.recording_number = self.reference_recording.meta_data.recording_number + self.perception_recording.ego_id = self.ego_id + self.perception_recording.timestamps.val = self.reference_recording.timestamps.val + self.perception_recording.custom_information = 'artificially created sensor data' + + def add_ego_position(self): + ego_length = self.ego_obj.end - self.ego_obj.birth + 1 + self.perception_recording.ego_position.heading.val = self.ego_obj.tr.heading + self.perception_recording.ego_position.pos_latitude.val = np.empty(ego_length) + self.perception_recording.ego_position.pos_longitude.val = np.empty(ego_length) + + for index in range(ego_length): + ref_x, ref_y = self.transformer.transform(self.reference_recording.meta_data.reference_point_lat, + self.reference_recording.meta_data.reference_point_lon) + + ego_lat, ego_lon = self.transformer.transform(ref_x + self.ego_obj.tr.pos_x[index], + ref_y + self.ego_obj.tr.pos_y[index], + direction='INVERSE') + + self.perception_recording.ego_position.pos_latitude.val[index] = ego_lat + self.perception_recording.ego_position.pos_longitude.val[index] = ego_lon + + # z information left empty + + # yaw rate and pitch left empty + + # dont set to zero --> leave empty + ''' + variance_vec = np.full(ego_length, 0.0) + self.perception_recording.ego_position.heading.var = variance_vec + self.perception_recording.ego_position.pos_latitude.var = variance_vec + self.perception_recording.ego_position.pos_longitude.var = variance_vec + ''' + + def add_meta_object(self): + measured = PerceptionTypes.PerceptionType.MEASURED + not_provided = PerceptionTypes.PerceptionType.NOT_PROVIDED + determined = PerceptionTypes.PerceptionType.DETERMINED + + # first: set all attributes to measured + for k, v in vars(self.perception_recording.meta_object).items(): + setattr(self.perception_recording.meta_object, k, measured) + + # second: set attributes that are not provided + self.perception_recording.meta_object.dist_z_val_type = not_provided + self.perception_recording.meta_object.dist_z_var_type = not_provided + self.perception_recording.meta_object.rcs_val_type = not_provided + self.perception_recording.meta_object.dist_longitudinal_var_type = not_provided + self.perception_recording.meta_object.dist_lateral_var_type = not_provided + self.perception_recording.meta_object.rel_vel_longitudinal_var_type = not_provided + self.perception_recording.meta_object.rel_vel_lateral_var_type = not_provided + self.perception_recording.meta_object.abs_vel_longitudinal_var_type = not_provided + self.perception_recording.meta_object.abs_vel_lateral_var_type = not_provided + self.perception_recording.meta_object.rel_acc_longitudinal_var_type = not_provided + self.perception_recording.meta_object.rel_acc_lateral_var_type = not_provided + self.perception_recording.meta_object.abs_acc_longitudinal_var_type = not_provided + self.perception_recording.meta_object.abs_acc_lateral_var_type = not_provided + self.perception_recording.meta_object.heading_var_type = not_provided + self.perception_recording.meta_object.width_var_type = not_provided + self.perception_recording.meta_object.height_var_type = not_provided + self.perception_recording.meta_object.length_var_type = not_provided + self.perception_recording.meta_object.height_val_type = not_provided + self.perception_recording.meta_object.object_classification_confidence_val_type = not_provided + self.perception_recording.meta_object.meas_state_val_type = not_provided + + # set attributes that are calculated from others + self.perception_recording.meta_object.movement_classification_val_type = determined + self.perception_recording.meta_object.rel_vel_longitudinal_val_type = determined + self.perception_recording.meta_object.rel_vel_lateral_val_type = determined + self.perception_recording.meta_object.rel_acc_longitudinal_val_type = determined + self.perception_recording.meta_object.rel_acc_lateral_val_type = determined + self.perception_recording.meta_object.age_val_type = determined + + + def add_objects(self): + for rid, road_user in self.reference_recording.road_users.items(): # type: int, RoadUser + if rid == self.ego_id: + continue + + obj_birth_index = max(self.ego_obj.birth, road_user.birth) + obj_death_index = min(self.ego_obj.end, road_user.end) + if not road_user.in_timespan(obj_birth_index, obj_death_index): + continue + + obj = self.create_object(road_user, obj_birth_index, obj_death_index) + self.convert_absolute_to_relative_object_values(obj, obj_birth_index, obj_death_index) + # self.fill_object_variances(obj) + self.perception_recording.objects[rid] = obj + + def create_object(self, road_user: RoadUser, obj_birth_index: int, obj_death_index: int): + obj = Object() + obj_length = obj_death_index - obj_birth_index + 1 + + self.fill_general_object_attributes(obj, obj_birth_index, obj_length, road_user) + self.calculate_movement_classification(obj, obj_length, road_user) + # self.calculate_object_azimuth(road_user, obj, obj_birth_index, obj_length) + + return obj + + def fill_general_object_attributes(self, obj, obj_birth_index, obj_length, road_user): + obj.id = road_user.id + obj.birth_stamp = obj_birth_index + obj.object_classification.val = [self.object_map[ReferenceTypes.RoadUserType(road_user.type)]] * obj_length + obj.object_classification.confidence = np.full(obj_length, 1.0) + + obj.heading.val = road_user.tr.heading + obj.dist_lateral.val = road_user.tr.pos_x + obj.dist_longitudinal.val = road_user.tr.pos_y + obj.dist_z.val = road_user.tr.pos_z + + obj.abs_vel_longitudinal.val = road_user.tr.vel_longitudinal + obj.abs_vel_lateral.val = road_user.tr.vel_lateral + obj.abs_acc_longitudinal.val = road_user.tr.acc_longitudinal + obj.abs_acc_lateral.val = road_user.tr.acc_lateral + + obj.width.val = np.full(obj_length, road_user.bb.width) + obj.length.val = np.full(obj_length, road_user.bb.length) + obj.height.val = np.full(obj_length, road_user.bb.height) + + obj.age = np.arange(0, obj_length, dtype=float) + obj.confidence_of_existence = np.full(obj_length, 1.0) + + obj.tracking_point = [PerceptionTypes.TrackingPoint.CENTER_OF_VEHICLE] * obj_length + obj.meas_state = [PerceptionTypes.MeasState.MEASURED] * obj_length + + def calculate_movement_classification(self, obj: Object, obj_length: int, road_user: RoadUser): + if road_user.tr.is_static: + obj.movement_classification = [PerceptionTypes.MovementClassification.STATIONARY] * obj_length + else: + obj.movement_classification = list(map(self.movement_mapper, list(road_user.tr.is_still))) + + @staticmethod + def movement_mapper(is_still: bool): + if is_still: + return PerceptionTypes.MovementClassification.STOPPED + else: + return PerceptionTypes.MovementClassification.MOVING + + # not needed anymore + ''' + def calculate_object_azimuth(self, road_user: RoadUser, obj: Object, obj_birth_index: int, obj_length: int): + geod = pyproj.CRS.from_epsg(25832).get_geod() + for index in range(obj_length): + ego_lat = self.perception_recording.ego_position.pos_latitude.val[index + obj_birth_index - 1] + ego_lon = self.perception_recording.ego_position.pos_longitude.val[index + obj_birth_index - 1] + + ego_x, ego_y = self.transformer.transform(ego_lat, ego_lon) + obj_lat, obj_lon = self.transformer.transform(ego_x + road_user.tr.pos_x[index], + ego_y + road_user.tr.pos_y[index], + direction='INVERSE') + + azimuth1, azimuth2, distance = geod.inv(ego_lon, ego_lat, obj_lon, obj_lat) + obj.azimuth.val = np.append(obj.azimuth.val, np.deg2rad(azimuth1)) + ''' + + def convert_absolute_to_relative_object_values(self, obj: Object, obj_birth_index: int, obj_death_index: int): + self.calculate_relative_distance(obj, obj_birth_index, obj_death_index) + self.calculate_relative_velocity(obj, obj_birth_index, obj_death_index) + self.calculate_relative_acceleration(obj, obj_birth_index, obj_death_index) + + def calculate_relative_distance(self, obj: Object, obj_birth_index: int, obj_death_index: int): + # abs reference coordinate system -> relative perception coordinate system + heading, dist_x, dist_y = self.transform_absolute_to_relative( + ego_h=self.ego_obj.tr.heading[obj_birth_index:obj_death_index + 1], + ego_x=self.ego_obj.tr.pos_x[obj_birth_index:obj_death_index + 1], + ego_y=self.ego_obj.tr.pos_y[obj_birth_index:obj_death_index + 1], + obj_h=obj.heading.val, + obj_x=obj.dist_lateral.val, + obj_y=obj.dist_longitudinal.val) + obj.heading.val = heading + obj.dist_lateral.val = dist_x + self.perception_recording.ego_offset + obj.dist_longitudinal.val = dist_y + + def calculate_relative_velocity(self, obj: Object, obj_birth_index: int, obj_death_index: int): + _, vel_x, vel_y = self.transform_absolute_to_relative( + ego_h=self.ego_obj.tr.heading[obj_birth_index:obj_death_index + 1], + ego_x=self.ego_obj.tr.vel_lateral[obj_birth_index:obj_death_index + 1], + ego_y=self.ego_obj.tr.vel_longitudinal[obj_birth_index:obj_death_index + 1], + obj_h=obj.heading.val, + obj_x=obj.abs_vel_lateral.val, + obj_y=obj.abs_vel_longitudinal.val) + obj.rel_vel_lateral.val = vel_x + obj.rel_vel_longitudinal.val = vel_y + + def calculate_relative_acceleration(self, obj: Object, obj_birth_index: int, obj_death_index: int): + _, acc_x, acc_y = self.transform_absolute_to_relative( + ego_h=self.ego_obj.tr.heading[obj_birth_index:obj_death_index + 1], + ego_x=self.ego_obj.tr.acc_lateral[obj_birth_index:obj_death_index + 1], + ego_y=self.ego_obj.tr.acc_longitudinal[obj_birth_index:obj_death_index + 1], + obj_h=obj.heading.val, + obj_x=obj.abs_acc_lateral.val, + obj_y=obj.abs_acc_longitudinal.val) + obj.rel_acc_lateral.val = acc_x + obj.rel_acc_longitudinal.val = acc_y + + # not needed. The variances var type is set to not_provided + ''' + def fill_object_variances(self, obj: Object): + variance_vec = np.full(obj.len, 0.0) + + obj.dist_lateral.var = variance_vec + obj.dist_longitudinal.var = variance_vec + obj.dist_z.var = variance_vec + obj.heading.var = variance_vec + obj.azimuth.var = variance_vec + + obj.abs_vel_longitudinal.var = variance_vec + obj.abs_vel_lateral.var = variance_vec + obj.abs_acc_longitudinal.var = variance_vec + obj.abs_acc_lateral.var = variance_vec + + obj.rel_vel_longitudinal.var = variance_vec + obj.rel_vel_lateral.var = variance_vec + obj.rel_acc_longitudinal.var = variance_vec + obj.rel_acc_lateral.var = variance_vec + + obj.width.var = variance_vec + obj.length.var = variance_vec + obj.height.var = variance_vec + + obj.size2d.var = variance_vec + obj.size3d.var = variance_vec + + obj.age.var = variance_vec + obj.tracking_point.var = variance_vec + ''' + + def transform_absolute_to_relative(self, ego_h, ego_x, ego_y, obj_h, obj_x, obj_y): + out_h = obj_h - ego_h + for i in range(out_h.size): + if out_h[i] < 0: + out_h[i] += 360. + + x = obj_x - ego_x + y = obj_y - ego_y + heading = ego_h - 90. + out_x = -np.multiply(x, np.cos(np.deg2rad(heading))) - np.multiply(y, np.sin(np.deg2rad(heading))) + out_y = -np.multiply(x, np.sin(np.deg2rad(heading))) + np.multiply(y, np.cos(np.deg2rad(heading))) + return out_h, out_x, out_y + + def add_sensors(self): + if self.sensors is not None: + for sensor_id, sensor in self.sensors.items(): + self.create_sensor_fov(sensor) + self.perception_recording.sensors[sensor_id] = sensor + elif self.sensor_config_dict is not None: + for sensor_id, sensor_dict in enumerate(self.sensor_config_dict['sensors']): + sensor = self.create_sensor_from_config(sensor_id, sensor_dict) + self.create_sensor_fov(sensor) + self.perception_recording.sensors[sensor_id] = sensor + else: + print("[ERROR] No sensor found!") + exit(1) + + def create_sensor_from_config(self, sensor_id: int, sensor_dict: dict): + temp = tempfile.TemporaryFile() + with h5py.File(temp, 'w') as f: + sensor_group = f.create_group(str(sensor_id)) + + for key, value in sensor_dict.items(): + sensor_group.attrs.create(key, data=value) + + sensor = Sensor.from_hdf5(sensor_group) + return sensor + + def create_sensor_fov(self, sensor: Sensor): + ego_offset = self.perception_recording.ego_offset + offset_x = sensor.sensor_pos_lateral + offset_y = sensor.sensor_pos_longitudinal + ego_offset + + heading = sensor.sensor_heading + dist_min = sensor.min_range + dist_max = sensor.max_range + diameter = dist_max * 2 + fov_horizontal = sensor.fov_horizontal + fov_vertical = sensor.fov_vertical + + start_angle = int(-fov_horizontal / 2) + span_angle = int(fov_horizontal) + + artist = QtWidgets.QGraphicsEllipseItem(-diameter / 2, -diameter / 2, diameter, diameter) + artist.setStartAngle(start_angle * 16) + artist.setSpanAngle(span_angle * 16) + + center_point = artist.boundingRect().center() + + # artist.translate(-center_point.x(), -center_point.y()) + q_transform = QTransform() + artist.setTransform(q_transform.fromTranslate(-center_point.x(), -center_point.y()), True) + artist.setRotation(heading + 90) + # artist.translate(center_point.x(), center_point.y()) + artist.setTransform(q_transform.fromTranslate(center_point.x(), center_point.y()), True) + + # artist.translate(offset_x, offset_y) + artist.setTransform(q_transform.fromTranslate(offset_x, offset_y), True) + + self.sensor_list.append(artist) + + def filter_objects_outside_sensor_fov(self): + for obj_index in list(self.perception_recording.objects.keys()): + obj = self.perception_recording.objects[obj_index] + obj_timespan_list = self.generate_object_in_ego_view_timespan_list(obj) + self.cut_object_to_ego_fov(obj, obj_timespan_list) + + def generate_object_in_ego_view_timespan_list(self, obj: Object): + obj_timespan_list = [] + start = -1 + + for i in range(obj.len): + x = obj.dist_lateral.val[i] + y = obj.dist_longitudinal.val[i] + length = obj.length.val[i] + width = obj.width.val[i] + + points = [ + QPointF(y, x), + QPointF(y, x + length), + QPointF(y + width, x), + QPointF(y + width, x + length), + ] + + if any([area.contains(point) for point in points for area in self.sensor_list]): + if start == -1: + start = i + elif start != -1: + end = i - 1 + obj_timespan_list.append((start, end)) + start = -1 + if start != -1: + end = obj.len - 1 + obj_timespan_list.append((start, end)) + return obj_timespan_list + + def cut_object_to_ego_fov(self, obj: Object, obj_timespan_list: list): + if len(obj_timespan_list) == 0: + del self.perception_recording.objects[obj.id] + else: + self.original_obj_id[obj.id] = obj.id + copy_obj = deepcopy(obj) + # first object appearance is set directly on the original object + obj.cut_to_timespan(*obj_timespan_list[0]) + # further object appearance are set on a copy + for start, end in obj_timespan_list[1:]: + adj_obj = deepcopy(copy_obj) + adj_obj.cut_to_timespan(start, end) + adj_obj.id = max(list(self.perception_recording.objects.keys())) + 1 + self.original_obj_id[adj_obj.id] = obj.id + self.perception_recording.objects[adj_obj.id] = adj_obj \ No newline at end of file diff --git a/omega_format/perception/ego_position.py b/omega_format/perception/ego_position.py index 26c4d13..60723d2 100644 --- a/omega_format/perception/ego_position.py +++ b/omega_format/perception/ego_position.py @@ -1,6 +1,6 @@ from pydantic.fields import Field from warnings import warn - +import numpy as np from h5py import Group from pydantic import validator from pydantic import BaseModel @@ -15,24 +15,27 @@ class Config(PydanticConfig): heading: ValVar = Field(default_factory=ValVar) pos_longitude: ValVar = Field(default_factory=ValVar) pos_latitude: ValVar = Field(default_factory=ValVar) - pos_z: float = 0.0 + pos_z: ValVar = Field(default_factory=ValVar) + yaw_rate: np.ndarray = np.array([], dtype=np.float64) + pitch: np.ndarray = np.array([], dtype=np.float64) - @validator('heading', 'pos_longitude', 'pos_latitude') - def check_array_length(cls, v, values): - if isinstance(v, ValVar): - assert len(v.val) == len(v.var), f'length of val {len(v.val)} and length of var {len(v.var)} are not the same' - return v - else: - if not len(v) > 0: - warn('received trajectory with empty array') + if False: + @validator('heading', 'pos_longitude', 'pos_latitude') + def check_array_length(cls, v, values): + if isinstance(v, ValVar): + assert len(v.val) == len(v.var), f'length of val {len(v.val)} and length of var {len(v.var)} are not the same' + return v + else: + if not len(v) > 0: + warn('received trajectory with empty array') - if len(values) > 0: - # first array would be validated if len(values)=0 -> no length to compare against - # use the length of pos_x to check equality with other array length - length = len(values.get('heading')) - if len(v) != length: - raise ValueError(f'length of all EgoPosition arrays must match, expected len {len(v)}, actual len {length}') - return v + if len(values) > 0: + # first array would be validated if len(values)=0 -> no length to compare against + # use the length of pos_x to check equality with other array length + length = len(values.get('heading')) + if len(v) != length: + raise ValueError(f'length of all EgoPosition arrays must match, expected len {len(v)}, actual len {length}') + return v @validator('heading') def check_angle(cls, v): @@ -59,7 +62,9 @@ def from_hdf5(cls, group: Group, validate: bool = True): heading=ValVar.from_hdf5(group['heading'], validate=validate), pos_longitude=ValVar.from_hdf5(group['posLongitude'], validate=validate), pos_latitude=ValVar.from_hdf5(group['posLatitude'], validate=validate), - pos_z=group['posZ'][()].astype(float), + pos_z=ValVar.from_hdf5(group['posZ'], validate=validate), + yaw_rate=group['yawRate'][()].astype(float), + pitch=group['pitch'][()].astype(float), ) return self @@ -67,4 +72,6 @@ def to_hdf5(self, group: Group): self.heading.to_hdf5(group.create_group('heading')) self.pos_longitude.to_hdf5(group.create_group('posLongitude')) self.pos_latitude.to_hdf5(group.create_group('posLatitude')) - group.create_dataset('posZ', data=self.pos_z) + self.pos_z.to_hdf5(group.create_group('posZ')) + group.create_dataset('yawRate', data=self.yaw_rate) + group.create_dataset('pitch', data=self.pitch) diff --git a/omega_format/perception/meta_object.py b/omega_format/perception/meta_object.py index d7ee972..7518ecc 100644 --- a/omega_format/perception/meta_object.py +++ b/omega_format/perception/meta_object.py @@ -30,10 +30,8 @@ class Config(PydanticConfig): abs_acc_longitudinal_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED abs_acc_lateral_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED abs_acc_lateral_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - azimuth_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - azimuth_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED object_classification_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - object_classification_confidence_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED + object_classification_confidence_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED heading_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED heading_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED @@ -43,13 +41,12 @@ class Config(PydanticConfig): height_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED length_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED length_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - size2d_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - size2d_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - size3d_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED - size3d_var_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED rcs_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED age_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED tracking_point_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED + confidence_of_existence_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED + movement_classification_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED + meas_state_val_type: PerceptionTypes.PerceptionType = PerceptionTypes.PerceptionType.NOT_PROVIDED @classmethod def from_hdf5(cls, group: Group, validate: bool = True): @@ -77,10 +74,8 @@ def from_hdf5(cls, group: Group, validate: bool = True): abs_acc_longitudinal_var_type=PerceptionTypes.PerceptionType(group.attrs['absAccLongitudinalVarType']), abs_acc_lateral_val_type=PerceptionTypes.PerceptionType(group.attrs['absAccLateralValType']), abs_acc_lateral_var_type=PerceptionTypes.PerceptionType(group.attrs['absAccLateralVarType']), - azimuth_val_type=PerceptionTypes.PerceptionType(group.attrs['azimuthValType']), - azimuth_var_type=PerceptionTypes.PerceptionType(group.attrs['azimuthVarType']), object_classification_val_type=PerceptionTypes.PerceptionType(group.attrs['objectClassificationValType']), - object_classification_confidence_type=PerceptionTypes.PerceptionType(group.attrs['objectClassificationConfidenceType']), + object_classification_confidence_val_type=PerceptionTypes.PerceptionType(group.attrs['objectClassificationConfidenceValType']), heading_val_type=PerceptionTypes.PerceptionType(group.attrs['headingValType']), heading_var_type=PerceptionTypes.PerceptionType(group.attrs['headingVarType']), @@ -90,13 +85,12 @@ def from_hdf5(cls, group: Group, validate: bool = True): height_var_type=PerceptionTypes.PerceptionType(group.attrs['heightVarType']), length_val_type=PerceptionTypes.PerceptionType(group.attrs['lengthValType']), length_var_type=PerceptionTypes.PerceptionType(group.attrs['lengthVarType']), - size2d_val_type=PerceptionTypes.PerceptionType(group.attrs['size2dValType']), - size2d_var_type=PerceptionTypes.PerceptionType(group.attrs['size2dVarType']), - size3d_val_type=PerceptionTypes.PerceptionType(group.attrs['size3dValType']), - size3d_var_type=PerceptionTypes.PerceptionType(group.attrs['size3dVarType']), rcs_val_type=PerceptionTypes.PerceptionType(group.attrs['rcsValType']), age_val_type=PerceptionTypes.PerceptionType(group.attrs['ageValType']), tracking_point_val_type=PerceptionTypes.PerceptionType(group.attrs['trackingPointValType']), + confidence_of_existence_val_type=PerceptionTypes.PerceptionType(group.attrs['confidenceOfExistenceValType']), + movement_classificaton_val_type=PerceptionTypes.PerceptionType(group.attrs['movementClassificationValType']), + meas_state_val_type=PerceptionTypes.PerceptionType(group.attrs['measStateValType']), ) return self @@ -123,10 +117,8 @@ def to_hdf5(self, group: Group): group.attrs.create('absAccLongitudinalVarType', data=self.abs_acc_longitudinal_var_type) group.attrs.create('absAccLateralValType', data=self.abs_acc_lateral_val_type) group.attrs.create('absAccLateralVarType', data=self.abs_acc_lateral_var_type) - group.attrs.create('azimuthValType', data=self.azimuth_val_type) - group.attrs.create('azimuthVarType', data=self.azimuth_var_type) group.attrs.create('objectClassificationValType', data=self.object_classification_val_type) - group.attrs.create('objectClassificationConfidenceType', data=self.object_classification_confidence_type) + group.attrs.create('objectClassificationConfidenceValType', data=self.object_classification_confidence_val_type) group.attrs.create('headingValType', data=self.heading_val_type) group.attrs.create('headingVarType', data=self.heading_var_type) @@ -136,10 +128,9 @@ def to_hdf5(self, group: Group): group.attrs.create('heightVarType', data=self.height_var_type) group.attrs.create('lengthValType', data=self.length_val_type) group.attrs.create('lengthVarType', data=self.length_var_type) - group.attrs.create('size2dValType', data=self.size2d_val_type) - group.attrs.create('size2dVarType', data=self.size2d_var_type) - group.attrs.create('size3dValType', data=self.size3d_val_type) - group.attrs.create('size3dVarType', data=self.size3d_var_type) group.attrs.create('rcsValType', data=self.rcs_val_type) group.attrs.create('ageValType', data=self.age_val_type) group.attrs.create('trackingPointValType', data=self.tracking_point_val_type) + group.attrs.create('confidenceOfExistenceValType', data=self.confidence_of_existence_val_type) + group.attrs.create('movementClassificationValType', data=self.movement_classification_val_type) + group.attrs.create('measStateValType', data=self.meas_state_val_type) diff --git a/omega_format/perception/object.py b/omega_format/perception/object.py index 7d061cf..4a80686 100644 --- a/omega_format/perception/object.py +++ b/omega_format/perception/object.py @@ -25,7 +25,9 @@ def check_confidence_values(cls, v): @validator('confidence') def check_array_length(cls, v, values): - assert len(v) == len(values.get('val')), f"length of confidence array does not match classifications array" + if len(v) != len(values.get('val')): + warn('length of confidence array does not match array length for classification. This is only possible if confidence is of type not_provided') + # assert len(v) == len(values.get('val')), f"length of confidence array does not match classifications array" return v @classmethod @@ -55,7 +57,7 @@ def cut_to_timespan(self, birth, death): assert len(self.confidence) > death self.confidence = self.confidence[birth:death + 1] - +''' class TrackingPoint(BaseModel): class Config(PydanticConfig): pass @@ -104,7 +106,7 @@ def cut_to_timespan(self, birth, death): assert len(self.var) > birth assert len(self.var) > death self.var = self.var[birth:death + 1] - +''' class Object(BaseModel): class Config(PydanticConfig): @@ -116,11 +118,10 @@ class Config(PydanticConfig): width: ValVar = Field(default_factory=ValVar) height: ValVar = Field(default_factory=ValVar) length: ValVar = Field(default_factory=ValVar) - size2d: ValVar = Field(default_factory=ValVar) - size3d: ValVar = Field(default_factory=ValVar) - rcs: ValVar = Field(default_factory=ValVar) - age: ValVar = Field(default_factory=ValVar) - tracking_point: TrackingPoint = Field(default_factory=TrackingPoint) + + rcs: np.ndarray = np.array([]) + age: np.ndarray = np.array([]) + tracking_point: List[PerceptionTypes.TrackingPoint] = Field(default_factory=list) confidence_of_existence: np.ndarray = np.array([]) movement_classification: List[PerceptionTypes.MovementClassification] = Field(default_factory=list) @@ -137,7 +138,6 @@ class Config(PydanticConfig): rel_acc_lateral: ValVar = Field(default_factory=ValVar) abs_acc_longitudinal: ValVar = Field(default_factory=ValVar) abs_acc_lateral: ValVar = Field(default_factory=ValVar) - azimuth: ValVar = Field(default_factory=ValVar) object_classification: ObjectClassification = Field(default_factory=ObjectClassification) @property @@ -160,7 +160,7 @@ def cut_to_timespan(self, birth, death): self.birth_stamp += birth for k, v in vars(self).items(): - if isinstance(v, ValVar) or isinstance(v, ObjectClassification) or isinstance(v, TrackingPoint): + if isinstance(v, ValVar) or isinstance(v, ObjectClassification): v.cut_to_timespan(birth, death) elif isinstance(v, np.ndarray) or isinstance(v, list): setattr(self, k, v[birth:death + 1]) @@ -177,11 +177,9 @@ def from_hdf5(cls, group: Group, validate: bool = True): width=ValVar.from_hdf5(group['width'], validate=validate), height=ValVar.from_hdf5(group['height'], validate=validate), length=ValVar.from_hdf5(group['length'], validate=validate), - size2d=ValVar.from_hdf5(group['size2d'], validate=validate), - size3d=ValVar.from_hdf5(group['size3d'], validate=validate), - rcs=ValVar.from_hdf5(group['rcs'], validate=validate), - age=ValVar.from_hdf5(group['age'], validate=validate), - tracking_point=TrackingPoint.from_hdf5(group['trackingPoint'], validate=validate), + rcs=group['rcs'][()], + age=group['age'][()], + tracking_point=group['trackingPoint'][()].tolist(), confidence_of_existence=group['confidenceOfExistence'][()], movement_classification=list(map(PerceptionTypes.MovementClassification, @@ -199,7 +197,6 @@ def from_hdf5(cls, group: Group, validate: bool = True): rel_acc_lateral=ValVar.from_hdf5(group['relAccLateral'], validate=validate), abs_acc_longitudinal=ValVar.from_hdf5(group['absAccLongitudinal'], validate=validate), abs_acc_lateral=ValVar.from_hdf5(group['absAccLateral'], validate=validate), - azimuth=ValVar.from_hdf5(group['azimuth'], validate=validate), object_classification=ObjectClassification.from_hdf5(group['objectClassification'], validate=validate), ) return self @@ -211,11 +208,9 @@ def to_hdf5(self, group: Group): self.width.to_hdf5(group.create_group('width')) self.height.to_hdf5(group.create_group('height')) self.length.to_hdf5(group.create_group('length')) - self.size2d.to_hdf5(group.create_group('size2d')) - self.size3d.to_hdf5(group.create_group('size3d')) - self.rcs.to_hdf5(group.create_group('rcs')) - self.age.to_hdf5(group.create_group('age')) - self.tracking_point.to_hdf5(group.create_group('trackingPoint')) + group.create_dataset('rcs', data=self.rcs) + group.create_dataset('age', data=self.age) + group.create_dataset('trackingPoint', data=self.tracking_point) group.create_dataset('confidenceOfExistence', data=self.confidence_of_existence) group.create_dataset('movementClassification', data=self.movement_classification) group.create_dataset('measState', data=self.meas_state) @@ -231,5 +226,4 @@ def to_hdf5(self, group: Group): self.rel_acc_lateral.to_hdf5(group.create_group('relAccLateral')) self.abs_acc_longitudinal.to_hdf5(group.create_group('absAccLongitudinal')) self.abs_acc_lateral.to_hdf5(group.create_group('absAccLateral')) - self.azimuth.to_hdf5(group.create_group('azimuth')) self.object_classification.to_hdf5(group.create_group('objectClassification')) diff --git a/omega_format/perception/sensor.py b/omega_format/perception/sensor.py index 700987f..6b22762 100644 --- a/omega_format/perception/sensor.py +++ b/omega_format/perception/sensor.py @@ -12,8 +12,10 @@ class Config(PydanticConfig): pass id: int = 0 sensor_modality: PerceptionTypes.SensorModality = PerceptionTypes.SensorModality.LIDAR + fusion_information: str = "" sensor_name: str = "" firmware_version: str = "" + original_updaterate: float = 0. sensor_pos_longitudinal: float = 0. sensor_pos_lateral: float = 0. sensor_pos_z: float = 0. @@ -25,18 +27,24 @@ class Config(PydanticConfig): min_range: confloat(ge=0) = 0. fov_vertical: confloat(ge=0) = 0. fov_horizontal: confloat(ge=0) = 0. - angle_resolution: confloat(ge=0) = 0. - horizontal_resolution: confloat(ge=0) = 0. + max_velocity: confloat(ge=0) = 0. + min_velocity: float = 0. + angle_resolution_vertical: confloat(ge=0) = 0. + angle_resolution_horizontal: confloat(ge=0) = 0. + range_resolution: confloat(ge=0) = 0. vertical_resolution: confloat(ge=0) = 0. velocity_resolution: confloat(ge=0) = 0. - angle_accuracy: confloat(ge=0, le=1) = 0. - vertical_accuracy: confloat(ge=0, le=1) = 0. - horizontal_accuracy: confloat(ge=0, le=1) = 0. - velocity_accuracy: confloat(ge=0, le=1) = 0. - angle_precision: confloat(ge=0, le=1) = 0. - horizontal_precision: confloat(ge=0, le=1) = 0. - vertical_precision: confloat(ge=0, le=1) = 0. - velocity_precision: confloat(ge=0, le=1) = 0. + angle_accuracy: confloat(ge=0) = 0. + vertical_accuracy: confloat(ge=0) = 0. + range_accuracy: confloat(ge=0) = 0. + velocity_accuracy: confloat(ge=0) = 0. + angle_precision: confloat(ge=0) = 0. + range_precision: confloat(ge=0) = 0. + vertical_precision: confloat(ge=0) = 0. + velocity_precision: confloat(ge=0) = 0. + track_confirmation_latency: confloat(ge=0) = 0. + track_drop_latency: confloat(ge=0) = 0. + max_object_tracks: confloat(ge=0) = 0. @validator('sensor_heading', 'sensor_pitch', 'sensor_roll') def check_angle(cls, v): @@ -50,8 +58,10 @@ def from_hdf5(cls, group: Group, validate: bool = True): self = func( id=int(sub_group_name), sensor_modality=PerceptionTypes.SensorModality(group.attrs['sensorModality']), + fusion_information=group.attrs['fusionInformation'], sensor_name=group.attrs['sensorName'], firmware_version=group.attrs['firmwareVersion'], + original_updaterate = group.attrs['originalUpdaterate'], sensor_pos_longitudinal=group.attrs['sensorPosLongitudinal'], sensor_pos_lateral=group.attrs['sensorPosLateral'], sensor_pos_z=group.attrs['sensorPosZ'], @@ -63,25 +73,33 @@ def from_hdf5(cls, group: Group, validate: bool = True): min_range=group.attrs['minRange'], fov_vertical=group.attrs['foVVertical'], fov_horizontal=group.attrs['foVHorizontal'], - angle_resolution=group.attrs['angleResolution'], - horizontal_resolution=group.attrs['horizontalResolution'], + max_velocity=group.attrs['maxVelocity'], + min_velocity=group.attrs['minVelocity'], + angle_resolution_vertical=group.attrs['angleResolutionVertical'], + angle_resolution_horizontal=group.attrs['angleResolutionHorizontal'], + range_resolution=group.attrs['rangeResolution'], vertical_resolution=group.attrs['verticalResolution'], velocity_resolution=group.attrs['velocityResolution'], angle_accuracy=group.attrs['angleAccuracy'], vertical_accuracy=group.attrs['verticalAccuracy'], - horizontal_accuracy=group.attrs['horizontalAccuracy'], + range_accuracy=group.attrs['rangeAccuracy'], velocity_accuracy=group.attrs['velocityAccuracy'], angle_precision=group.attrs['anglePrecision'], - horizontal_precision=group.attrs['horizontalPrecision'], + range_precision=group.attrs['rangePrecision'], vertical_precision=group.attrs['verticalPrecision'], velocity_precision=group.attrs['velocityPrecision'], + track_confirmation_latency=group.attrs['trackConfirmationLatency'], + track_drop_latency=group.attrs['trackDropLatency'], + max_object_tracks=group.attrs['maxObjectTracks'] ) return self def to_hdf5(self, group: Group): group.attrs.create('sensorModality', data=self.sensor_modality) + group.attrs.create('fusionInformation', data=self.fusion_information) group.attrs.create('sensorName', data=self.sensor_name) group.attrs.create('firmwareVersion', data=self.firmware_version) + group.attrs.create('originalUpdaterate', data=self.original_updaterate) group.attrs.create('sensorPosLongitudinal', data=self.sensor_pos_longitudinal) group.attrs.create('sensorPosLateral', data=self.sensor_pos_lateral) group.attrs.create('sensorPosZ', data=self.sensor_pos_z) @@ -93,15 +111,21 @@ def to_hdf5(self, group: Group): group.attrs.create('minRange', data=self.min_range) group.attrs.create('foVVertical', data=self.fov_vertical) group.attrs.create('foVHorizontal', data=self.fov_horizontal) - group.attrs.create('angleResolution', data=self.angle_resolution) - group.attrs.create('horizontalResolution', data=self.horizontal_resolution) + group.attrs.create('maxVelocity', data=self.max_velocity) + group.attrs.create('minVelocity', data=self.min_velocity) + group.attrs.create('angleResolutionVertical', data=self.angle_resolution_vertical) + group.attrs.create('angleResolutionHorizontal', data=self.angle_resolution_horizontal) + group.attrs.create('rangeResolution', data=self.range_resolution) group.attrs.create('verticalResolution', data=self.vertical_resolution) group.attrs.create('velocityResolution', data=self.velocity_resolution) group.attrs.create('angleAccuracy', data=self.angle_accuracy) group.attrs.create('verticalAccuracy', data=self.vertical_accuracy) - group.attrs.create('horizontalAccuracy', data=self.horizontal_accuracy) + group.attrs.create('rangeAccuracy', data=self.range_accuracy) group.attrs.create('velocityAccuracy', data=self.velocity_accuracy) group.attrs.create('anglePrecision', data=self.angle_precision) - group.attrs.create('horizontalPrecision', data=self.horizontal_precision) + group.attrs.create('rangePrecision', data=self.range_precision) group.attrs.create('verticalPrecision', data=self.vertical_precision) group.attrs.create('velocityPrecision', data=self.velocity_precision) + group.attrs.create('trackConfirmationLatency', data=self.track_confirmation_latency) + group.attrs.create('trackDropLatency', data=self.track_drop_latency) + group.attrs.create('maxObjectTracks', data=self.max_object_tracks) diff --git a/omega_format/perception/valvar.py b/omega_format/perception/valvar.py index 02fd3d5..c054b9a 100644 --- a/omega_format/perception/valvar.py +++ b/omega_format/perception/valvar.py @@ -20,8 +20,9 @@ def check_array_length(cls, v, values): length = len(values.get('val')) if len(v) != length: - raise ValueError( - f'length of var array must match with val array, expected len {len(v)}, actual len {length}') + warn('lenght of var does not match length of val. This is only possible if var is of type not_provided') + # raise ValueError( + # f'length of var array must match with val array, expected len {len(v)}, actual len {length}') return v @classmethod diff --git a/omega_format/perception_recording.py b/omega_format/perception_recording.py index ef67dab..91bd201 100644 --- a/omega_format/perception_recording.py +++ b/omega_format/perception_recording.py @@ -21,12 +21,13 @@ class PerceptionRecording(BaseModel): """ class Config(PydanticConfig): pass - format_version: str = "1.2" + format_version: str = "1.3" converter_version: str = "" recorder_number: int = 0 recording_number: int = 0 ego_id: int = 0 ego_offset: float = 0. + custom_information: str = "" timestamps: Timestamps = Field(default_factory=Timestamps) meta_object: MetaObject = Field(default_factory=MetaObject) @@ -48,6 +49,7 @@ def from_hdf5(cls, filename: Union[str, Path], validate: bool = True): recording_number=file.attrs['recordingNumber'], ego_id=int(file.attrs['egoID']), ego_offset=file.attrs['egoOffset'], + custom_information=file.attrs['customInformation'], timestamps=tfunc(val=file['timestamps'][()]), meta_object=MetaObject.from_hdf5(file['object'], validate=validate), ego_position=EgoPosition.from_hdf5(file['egoPosition'], validate=validate), @@ -67,6 +69,7 @@ def to_hdf5(self, filename): file.attrs.create('recordingNumber', data=self.recording_number) file.attrs.create('egoID', data=self.ego_id) file.attrs.create('egoOffset', data=self.ego_offset) + file.attrs.create('customInformation', data=self.custom_information) file.create_dataset('timestamps', data=self.timestamps.val) diff --git a/omega_format/reference_recording.py b/omega_format/reference_recording.py index 9a41254..f0c04aa 100644 --- a/omega_format/reference_recording.py +++ b/omega_format/reference_recording.py @@ -25,7 +25,7 @@ class ReferenceRecording(InputClassBase): Class that represents the OMEGA Format Reference Recording in an object-oriented manner. """ meta_data: MetaData = Field(default_factory=MetaData) - timestamps: Timestamps = None + timestamps: Timestamps = Field(default_factory=Timestamps) ego_id: Optional[int] = None ego_vehicle: Optional[RoadUser] = None weather: Weather = None @@ -52,6 +52,8 @@ def from_hdf5(cls, filename: Union[str, Path], validate: bool = True): ) self.ego_id = cls.extract_ego_id(road_users=self.road_users) + #if self.ego_id is not None and self.ego_vehicle is None: + # self.ego_vehicle = self.road_users.pop(self.ego_id) self.resolve() return self diff --git a/omega_format/road/boundary.py b/omega_format/road/boundary.py index 6b98e4a..540fd2a 100644 --- a/omega_format/road/boundary.py +++ b/omega_format/road/boundary.py @@ -7,13 +7,13 @@ class Boundary(InputClassBase): - color: ReferenceTypes.BoundaryColor - condition: ReferenceTypes.BoundaryCondition - poly_index_start: conint(ge=0) - poly_index_end: conint(ge=0) - type: ReferenceTypes.BoundaryType - sub_type: ReferenceTypes.BoundarySubType - is_right_boundary: bool + color: ReferenceTypes.BoundaryColor = None + condition: ReferenceTypes.BoundaryCondition = None + poly_index_start: conint(ge=0) = 0 + poly_index_end: conint(ge=0) = 0 + type: ReferenceTypes.BoundaryType = None + sub_type: ReferenceTypes.BoundarySubType = None + is_right_boundary: bool = None overridden_by: ReferenceDict = Field(default_factory=lambda: ReferenceDict([], Boundary)) overrides: ReferenceDict = Field(default_factory=lambda: ReferenceDict([], Boundary)) height: confloat(ge=0) = 0 diff --git a/omega_format/road/lane.py b/omega_format/road/lane.py index c39a980..d1d0d91 100644 --- a/omega_format/road/lane.py +++ b/omega_format/road/lane.py @@ -12,10 +12,10 @@ class Lane(InputClassBase): - border_right: ReferenceElement - border_left: ReferenceElement - type: ReferenceTypes.LaneType - sub_type: ReferenceTypes.LaneSubType + border_right: ReferenceElement = None + border_left: ReferenceElement = None + type: ReferenceTypes.LaneType = None + sub_type: ReferenceTypes.LaneSubType = None boundaries: DictWithProperties = Field(default_factory=DictWithProperties) predecessors: ReferenceDict = Field(default_factory=lambda: ReferenceDict([], Lane)) successors: ReferenceDict = Field(default_factory=lambda: ReferenceDict([], Lane)) diff --git a/omega_format/road/road.py b/omega_format/road/road.py index 9e4d24d..211ba6f 100644 --- a/omega_format/road/road.py +++ b/omega_format/road/road.py @@ -12,7 +12,8 @@ class Road(InputClassBase): - location: ReferenceTypes.RoadLocation + location: ReferenceTypes.RoadLocation = None + num_lanes: int = None lateral_markings: DictWithProperties = Field(default_factory=DictWithProperties) lanes: DictWithProperties = Field(default_factory=DictWithProperties) borders: DictWithProperties = Field(default_factory=DictWithProperties) @@ -24,6 +25,10 @@ class Road(InputClassBase): def num_lanes(self): return len(self.lanes) + @num_lanes.setter + def num_lanes(self, value): + self._num_lanes = value + @classmethod @raise_not_resolved def resolve_func(cls, input_recording, i): diff --git a/omega_format/timestamps.py b/omega_format/timestamps.py index 4ab0d18..d409bdd 100644 --- a/omega_format/timestamps.py +++ b/omega_format/timestamps.py @@ -4,7 +4,7 @@ class Timestamps(InputClassBase): - val: np.ndarray = np.array([0]) + val: np.ndarray = np.array([], dtype=np.float64)# np.ndarray = np.array([0]) def cut_to_timespan(self, birth, death): self.val = self.val[birth:death+1] diff --git a/omega_format/visualization/modules/base.py b/omega_format/visualization/modules/base.py index 3fbbbb9..8d0e83a 100644 --- a/omega_format/visualization/modules/base.py +++ b/omega_format/visualization/modules/base.py @@ -47,6 +47,7 @@ def __init__(self, reference: Optional[ReferenceRecording]=None, perception: Opt elif self.perception is not None: self.timestamps = perception.timestamps.val self.identifier = 'only_perception' + self.convert_perception_coordinates_to_plot_coordinates() else: raise ValueError('Either `reference` or `perception` has to be set') @@ -64,20 +65,30 @@ def create_list(cls, references=None, perceptions=None): else: return [cls(reference=references, perception=perceptions)] + def convert_perception_coordinates_to_plot_coordinates(self): + for obj in self.perception.objects.values(): # type: Object + h = obj.heading.val + x = -obj.dist_lateral.val + y = obj.dist_longitudinal.val + heading = 180 + obj.heading.val += heading/2 + obj.dist_lateral.val = - np.multiply(x, np.cos(np.deg2rad(heading))) + np.multiply(y, np.sin(np.deg2rad(heading))) + obj.dist_longitudinal.val = - np.multiply(x, np.sin(np.deg2rad(heading))) - np.multiply(y, np.cos(np.deg2rad(heading))) + def convert_perception_coordinates_to_reference_coordinates(self): for obj in self.perception.objects.values(): # type: Object death = obj.birth_stamp+len(obj.heading.val) - obj.dist_lateral.val -= self.perception.ego_offset # adjust ego offset + obj.dist_longitudinal.val += self.perception.ego_offset # adjust ego offset ego_h = self.reference.ego_vehicle.tr.heading[obj.birth_stamp:death] ego_x = self.reference.ego_vehicle.tr.pos_x[obj.birth_stamp:death] ego_y = self.reference.ego_vehicle.tr.pos_y[obj.birth_stamp:death] - h = obj.heading.val - x = obj.dist_lateral.val + x = -obj.dist_lateral.val y = obj.dist_longitudinal.val - obj.heading.val = h+ego_h - obj.dist_lateral.val = np.multiply(x, np.cos(np.deg2rad(ego_h))) - np.multiply(y, np.sin(np.deg2rad(ego_h)))+ego_x - obj.dist_longitudinal.val = np.multiply(x, np.sin(np.deg2rad(ego_h))) + np.multiply(y, np.cos(np.deg2rad(ego_h)))+ego_y + heading = ego_h + 270 + obj.dist_lateral.val = np.multiply(x, np.cos(np.deg2rad(heading))) - np.multiply(y, np.sin(np.deg2rad(heading)))+ego_x + obj.dist_longitudinal.val = np.multiply(x, np.sin(np.deg2rad(heading))) + np.multiply(y, np.cos(np.deg2rad(heading)))+ego_y + obj.heading.val += ego_h def adjust_perception_object_birth_stamps(self): ego_time_offset = self.reference.ego_vehicle.birth diff --git a/omega_format/visualization/modules/perc_sensors.py b/omega_format/visualization/modules/perc_sensors.py index 58e2289..a6ec960 100644 --- a/omega_format/visualization/modules/perc_sensors.py +++ b/omega_format/visualization/modules/perc_sensors.py @@ -18,6 +18,7 @@ def visualize_dynamics(self, snip, timestamp: Timestamps, visualizer): return [] items = [] + ego_offset = snip.perception.ego_offset if snip.identifier != 'only_perception': ego_obj = snip.reference.ego_vehicle @@ -29,18 +30,19 @@ def visualize_dynamics(self, snip, timestamp: Timestamps, visualizer): for id_, sensor in snip.perception.sensors.items(): # type: int, Sensor offset_x = sensor.sensor_pos_lateral - offset_y = sensor.sensor_pos_longitudinal + offset_y = sensor.sensor_pos_longitudinal + ego_offset heading = sensor.sensor_heading dist_min = sensor.min_range dist_max = sensor.max_range + diameter = dist_max * 2 fov_horizontal = sensor.fov_horizontal fov_vertical = sensor.fov_vertical - start_angle = -fov_horizontal - span_angle = fov_horizontal * 2 + start_angle = -fov_horizontal/2 + span_angle = fov_horizontal - artist = QtWidgets.QGraphicsEllipseItem(-dist_max / 2, -dist_max / 2, dist_max, dist_max) + artist = QtWidgets.QGraphicsEllipseItem(-diameter / 2, -diameter / 2, diameter, diameter) artist.setStartAngle(start_angle * 16) artist.setSpanAngle(span_angle * 16) @@ -54,14 +56,14 @@ def visualize_dynamics(self, snip, timestamp: Timestamps, visualizer): center_point = artist.boundingRect().center() artist.translate(-center_point.x(), -center_point.y()) if snip.identifier != 'only_perception': - artist.setRotation(heading + ego_h); + artist.setRotation(heading + ego_h) else: - artist.setRotation(heading); + artist.setRotation(heading + 90) artist.translate(center_point.x(), center_point.y()) if snip.identifier != 'only_perception': - new_x = np.multiply(offset_x, np.cos(np.deg2rad(ego_h))) - np.multiply(offset_y, np.sin(np.deg2rad(ego_h))) + ego_x - new_y = np.multiply(offset_x, np.sin(np.deg2rad(ego_h))) + np.multiply(offset_y, np.cos(np.deg2rad(ego_h))) + ego_y + new_x = np.multiply(offset_y, np.cos(np.deg2rad(ego_h))) - np.multiply(offset_x, np.sin(np.deg2rad(ego_h))) + ego_x + new_y = np.multiply(offset_y, np.sin(np.deg2rad(ego_h))) + np.multiply(offset_x, np.cos(np.deg2rad(ego_h))) + ego_y artist.translate(new_x, new_y) else: artist.translate(offset_x, offset_y) diff --git a/omega_format/visualization/pyqt_helper.py b/omega_format/visualization/pyqt_helper.py index e4f4225..4b81ba5 100644 --- a/omega_format/visualization/pyqt_helper.py +++ b/omega_format/visualization/pyqt_helper.py @@ -246,9 +246,11 @@ def tp_items(tp, text, timestamp, pen, brush, color): if tp.bb.length == 0 or tp.bb.width == 0: if tp.type is ReferenceTypes.RoadUserType.PEDESTRIAN: - tp.bb = BoundingBox(DefaultValues.pedestrian) + tp.bb = BoundingBox() + tp.bb.vec = DefaultValues.pedestrian elif tp.type is ReferenceTypes.RoadUserType.BICYCLE: - tp.bb = BoundingBox(DefaultValues.bicycle) + tp.bb = BoundingBox() + tp.bb.vec = DefaultValues.bicycle length = tp.bb.length width = tp.bb.width diff --git a/omega_format/visualization/visualizer.py b/omega_format/visualization/visualizer.py index 3243da2..0838d4b 100644 --- a/omega_format/visualization/visualizer.py +++ b/omega_format/visualization/visualizer.py @@ -110,8 +110,12 @@ def visualize_previous_frame(self, pause=False): self.visualize() def elapsed_time(self, index): + # if no time information + if len(self.snippet.timestamps) == 0: + return 0 return (self.snippet.timestamps[index] - self.snippet.timestamps[0]) + def visualize(self): """ Main Visualization function diff --git a/omega_format/weather/cloudiness.py b/omega_format/weather/cloudiness.py index 3ddf95e..0805f3d 100644 --- a/omega_format/weather/cloudiness.py +++ b/omega_format/weather/cloudiness.py @@ -33,4 +33,4 @@ def to_hdf5(self, group: Group): @property def is_cloudy(self): degree = np.mean(self.degree) - return degree >= 3 + return degree >= 2 diff --git a/omega_format/weather/wind.py b/omega_format/weather/wind.py index 25a929f..d385ec1 100644 --- a/omega_format/weather/wind.py +++ b/omega_format/weather/wind.py @@ -49,7 +49,8 @@ def to_hdf5(self, group: Group): @property def is_windy(self): most_frequent_type = max(set(self.type), key=self.type.count, default=None) - return most_frequent_type in [ReferenceTypes.Wind.MODERATE_BREEZE, + return most_frequent_type in [ReferenceTypes.Wind.GENTLE_BREEZE, + ReferenceTypes.Wind.MODERATE_BREEZE, ReferenceTypes.Wind.FRESH_BREEZE, ReferenceTypes.Wind.STRONG_BREEZE, ReferenceTypes.Wind.NEAR_GALE,