diff --git a/examples.py b/examples.py index 7bf9a44..57ea7b6 100644 --- a/examples.py +++ b/examples.py @@ -3,20 +3,18 @@ Requires: opencv-python, numpy - -TODO: - Detection example """ import cv2 import numpy as np -from lapixdl.evaluation.evaluate import evaluate_segmentation, evaluate_classification +from lapixdl.evaluation.evaluate import evaluate_segmentation, evaluate_classification, evaluate_detection from lapixdl.evaluation.model import BBox, Classification def main(): # Model evaluation examples evaluate_segmentation_example() + evaluate_detection_example() evaluate_classification_example() @@ -73,6 +71,34 @@ def evaluate_segmentation_example(): # Shows confusion matrix for class `a` metrics.by_class[0].show_confusion_matrix() +def evaluate_detection_example(): + # Class names + classes = ['kite', 'person'] + + # Image shape + mask_shape = (480, 640) + + # Creating fake data + gt_bbox_1 = BBox(10, 10, 10, 10, 0) + pred_bbox_1 = BBox(10, 10, 10, 10, 0) + + gt_bbox_2 = BBox(110, 110, 320, 280, 1) + pred_bbox_2 = BBox(70, 50, 240, 220, 1) + + # Creating data suplier iterator + # It is not necessary here, but it's useful if you want to yield data + # from the disk i.e. from a Pytorch DataLoader + it_gt_masks = identity_iterator([gt_bbox_1, gt_bbox_2]) + it_pred_masks = identity_iterator([pred_bbox_1, pred_bbox_2]) + + # Calculates and shows metrics + metrics = evaluate_detection(it_gt_masks, it_pred_masks, classes) + + # Shows confusion matrix and returns its Figure and Axes + fig, axes = metrics.show_confusion_matrix() + + # Shows confusion matrix for class `a` + metrics.by_class[0].show_confusion_matrix() def evaluate_classification_example(): # Class names diff --git a/lapixdl/evaluation/evaluate.py b/lapixdl/evaluation/evaluate.py index 3858fb5..8e3f203 100644 --- a/lapixdl/evaluation/evaluate.py +++ b/lapixdl/evaluation/evaluate.py @@ -90,7 +90,8 @@ def __merge_detection_metrics(acc: DetectionMetrics, def evaluate_detection(gt_bboxes: Iterable[List[BBox]], pred_bboxes: Iterable[List[BBox]], classes: List[str], - iou_threshold: float = .5) -> DetectionMetrics: + iou_threshold: float = .5, + undetected_cls_name: str = "_undetected_") -> DetectionMetrics: """Evaluates detection predictions The iterables should return the bboxes of one image per iteration @@ -102,19 +103,37 @@ def evaluate_detection(gt_bboxes: Iterable[List[BBox]], pred_bboxes (Iterable[List[BBox]]): Predicted classifications. classes (List[str]): Class names. iou_threshold (float): Minimum IoU threshold to consider a detection as a True Positive. + undetected_cls_name (str): Name to be used for undetected instances class. Defaults to "_undetected_". Returns: DetectionMetrics: Detection metrics. """ - confusion_matrix = np.zeros((len(classes), len(classes)), np.int) + confusion_matrix = np.zeros((len(classes) + 1, len(classes) + 1), np.int) + undetected_idx = len(classes) for (curr_gt_bboxes, curr_pred_bboxes) in tqdm(zip(gt_bboxes, pred_bboxes), unit=' samples'): pairwise_bbox_ious = calculate_pairwise_bbox_ious( curr_gt_bboxes, curr_pred_bboxes) - #TODO: Evaluate IoU results - metrics = DetectionMetrics(classes, confusion_matrix) + no_hit_idxs = set(range(0, len(curr_pred_bboxes))) + + for i, gt_ious in enumerate(pairwise_bbox_ious): + gt_cls_idx = curr_gt_bboxes[i].cls + + hits_idxs = [i for i, iou in enumerate(gt_ious) if iou >= iou_threshold] + no_hit_idxs.difference_update(hits_idxs) + + if len(hits_idxs) == 0: + confusion_matrix[undetected_idx, gt_cls_idx] += 1 + else: + for hit_idx in hits_idxs: + confusion_matrix[curr_pred_bboxes[hit_idx].cls, gt_cls_idx] += 1 + + for no_hit_idx in no_hit_idxs: + confusion_matrix[curr_pred_bboxes[no_hit_idx].cls, undetected_idx] += 1 + + metrics = DetectionMetrics(classes + [undetected_cls_name], confusion_matrix) print(metrics) @@ -152,7 +171,7 @@ def calculate_bbox_iou(bbox_a: BBox, bbox_b: BBox) -> float: Returns: float: IoU between the two bboxes. """ - + # Gets each box upper left and bottom right coordinates (upr_lft_x_a, upr_lft_y_a) = bbox_a.upper_left_point (btm_rgt_x_a, btm_rgt_y_a) = bbox_a.bottom_right_point diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index bf82b9a..0856673 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -391,16 +391,17 @@ def avg_precision(self): class DetectionMetrics(ClassificationMetrics): - """Multiclass instance-based classification metrics + """Multiclass detection metrics Attributes: classes (List[str]): Class names. - confusion_matrix (List[List[int]]): Confusion matrix of all the classes. + confusion_matrix (List[List[int]]): Confusion matrix of all the classes. + The last colunm and line must correspond to the "undetected" class. """ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []): super().__init__(classes, confusion_matrix) - self._by_class = [BinarySegmentationMetrics(x) for x in self.by_class] + self._by_class = [BinaryDetectionMetrics(x) for x in self.by_class] @property def avg_iou(self): diff --git a/tests/evaluation/evaluate/test_evaluate_detection.py b/tests/evaluation/evaluate/test_evaluate_detection.py new file mode 100644 index 0000000..cd638d3 --- /dev/null +++ b/tests/evaluation/evaluate/test_evaluate_detection.py @@ -0,0 +1,11 @@ +import pytest +import numpy as np + +from lapixdl.evaluation.evaluate import evaluate_segmentation +from lapixdl.evaluation.model import BBox +from ... import utils + + +def test_evaluation_detection_metrics(): + #TODO: Implement detection metrics tests + pass