Skip to content

Commit

Permalink
Corrections to support classes wo instances
Browse files Browse the repository at this point in the history
  • Loading branch information
Andrevmatias committed Mar 3, 2021
1 parent 2bddd97 commit c6f03c2
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 18 deletions.
15 changes: 9 additions & 6 deletions lapixdl/evaluation/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ def evaluate_detection(gt_bboxes: Iterable[List[BBox]],
confusion_matrix = np.zeros((classes_count + 1, classes_count + 1), np.int)
undetected_idx = classes_count
# Tracks detection scores to calculate the Precision x Recall curve and the Average Precision metric
predictions_by_class: List[List[PredictionResult]] = [[] for i in range(len(classes))]
predictions_by_class: List[List[PredictionResult]] = [
[] for i in range(len(classes))]

# For each image GT and predicted bbox set
for (curr_gt_bboxes, curr_pred_bboxes) in tqdm(zip(gt_bboxes, pred_bboxes), unit=' samples'):
Expand All @@ -124,17 +125,17 @@ def evaluate_detection(gt_bboxes: Iterable[List[BBox]],
# Only the max IoU is considered TP, the others are FPs
max_iou = gt_ious[max_iou_idx]

if max_iou < iou_threshold: # FN - GT bbox not detected
if max_iou < iou_threshold: # FN - GT bbox not detected
confusion_matrix[undetected_idx, gt_cls_idx] += 1
else: # TP - GT bbox detected
no_hit_idxs.remove(max_iou_idx) # Remove from FPs
else: # TP - GT bbox detected
no_hit_idxs.remove(max_iou_idx) # Remove from FPs

pred_cls_idx = curr_pred_bboxes[max_iou_idx].cls
confusion_matrix[pred_cls_idx, gt_cls_idx] += 1
predictions_by_class[pred_cls_idx]\
.append(PredictionResult(curr_pred_bboxes[max_iou_idx].score, PredictionResultType.TP))

for no_hit_idx in no_hit_idxs: # FPs - Predictions that do not match any GT
for no_hit_idx in no_hit_idxs: # FPs - Predictions that do not match any GT
pred_cls_idx = curr_pred_bboxes[no_hit_idx].cls
confusion_matrix[pred_cls_idx, undetected_idx] += 1
predictions_by_class[pred_cls_idx]\
Expand Down Expand Up @@ -215,6 +216,8 @@ def calculate_iou_by_class(gt_bboxes: List[BBox],

def __calculate_binary_iou(gt_bboxes: List[BBox],
pred_bboxes: List[BBox]) -> float:
if len(gt_bboxes) == 0 or len(pred_bboxes) == 0:
return .0

bboxes_btm_right_points = [bbox.bottom_right_point for bbox in gt_bboxes]\
+ [bbox.bottom_right_point for bbox in pred_bboxes]
Expand Down
40 changes: 29 additions & 11 deletions lapixdl/evaluation/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,11 @@ class BinaryClassificationMetrics:
TN: int = 0
FN: int = 0

@property
def has_instances(self) -> bool:
"""int: Indicates if the class has any ground truth or predicted instances."""
return self.count > 0

@property
def count(self) -> int:
"""int: Total count of classified instances."""
Expand Down Expand Up @@ -214,8 +219,9 @@ def show_confusion_matrix(self):
return plot.confusion_matrix(self.confusion_matrix, ['P', 'N'], f'Confusion Matrix for \"{self.cls}\" Class')

def __str__(self):
no_instances_string = ' [NO INSTANCES]' if not self.has_instances else ''
return (
f'{self.cls}:\n'
f'{self.cls}{no_instances_string}:\n'
f'\tTP: {self.TP}\n'
f'\tTN: {self.TN}\n'
f'\tFP: {self.FP}\n'
Expand Down Expand Up @@ -254,13 +260,20 @@ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []):
self._classes = classes

self._by_class = self.__get_by_class_metrics()
self._by_class_w_instances = list(filter(
lambda c: c.has_instances, self._by_class))
self._count = self._confusion_matrix.sum()

@property
def by_class(self) -> List[BinaryClassificationMetrics]:
"""List[BinaryClassificationMetrics]: Binary metrics calculated for each class index."""
return self._by_class

@property
def by_class_w_instances(self) -> List[BinaryClassificationMetrics]:
"""List[BinaryClassificationMetrics]: Binary metrics calculated for each class index with instances."""
return self._by_class_w_instances

@property
def count(self) -> int:
"""int: Total count of classified instances."""
Expand All @@ -274,27 +287,27 @@ def accuracy(self) -> float:
@property
def avg_recall(self) -> float:
"""float: Macro average recall metric."""
return reduce(lambda acc, curr: curr.recall + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.recall + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances)

@property
def avg_precision(self) -> float:
"""float: Macro average precision metric."""
return reduce(lambda acc, curr: curr.precision + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.precision + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances)

@property
def avg_specificity(self) -> float:
"""float: Macro average specificity metric."""
return reduce(lambda acc, curr: curr.specificity + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.specificity + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances)

@property
def avg_f_score(self) -> float:
"""float: Macro average F-Score/Dice metric."""
return reduce(lambda acc, curr: curr.f_score + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.f_score + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances)

@property
def avg_false_positive_rate(self) -> float:
"""float: Macro average False Positive Rate metric."""
return reduce(lambda acc, curr: curr.false_positive_rate + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.false_positive_rate + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances)

@property
def confusion_matrix(self) -> List[List[int]]:
Expand Down Expand Up @@ -408,16 +421,18 @@ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []):
'There should be at least two classes (with background as the first)'
super().__init__(classes, confusion_matrix)
self._by_class = [BinarySegmentationMetrics(x) for x in self.by_class]
self._by_class_w_instances = [
x for x in self.by_class if x.has_instances]

@property
def avg_iou(self) -> float:
"""float: Macro average IoU/Jaccard Index metric."""
return reduce(lambda acc, curr: curr.iou + acc, self._by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances)

@property
def avg_iou_no_bkg(self) -> float:
"""float: Macro average IoU/Jaccard Index metric without `background` class (index 0)."""
return reduce(lambda acc, curr: curr.iou + acc, self._by_class[1:], .0) / (len(self.by_class) - 1)
return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances[1:], .0) / (len(self._by_class_w_instances) - 1)

def __str__(self):
return (
Expand Down Expand Up @@ -457,7 +472,7 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics,
)
self._iou = iou
self._precision_recall_curve = self.__calculate_precision_recall_curve(
predictions)
predictions) if self.gt_count > 0 else []

@property
def gt_count(self) -> int:
Expand All @@ -480,6 +495,7 @@ def iou(self) -> float:
@property
def precision_recall_curve(self) -> List[Tuple[float, float]]:
"""List[Tuple[float, float]]: Precision x Recall curve as a list of (Recall, Precision) tuples."""
assert self.gt_count > 0, "This class does not have instances."
return self._precision_recall_curve

def average_precision(self, interpolation_points: Optional[int] = None) -> float:
Expand Down Expand Up @@ -588,14 +604,16 @@ def __init__(self, classes: List[str],
by_class_wo_undetected = self.by_class[slice(0, -1)]
self._by_class = [BinaryDetectionMetrics(by_class, iou, predictions)
for by_class, iou, predictions in zip(by_class_wo_undetected, iou_by_class, predictions_by_class)]
self._by_class_w_instances = [
x for x in self.by_class if x.has_instances]

@property
def avg_iou(self):
"""float: Macro average IoU/Jaccard Index metric.
This metric is calculated as for segmentation, considering bboxes as pixel masks.
"""
return reduce(lambda acc, curr: curr.iou + acc, self.by_class, .0) / len(self.by_class)
return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances)

def mean_average_precision(self, interpolation_points: Optional[int] = None) -> float:
"""Calculates the mean of Average Precision metrics of all classes.
Expand All @@ -607,7 +625,7 @@ def mean_average_precision(self, interpolation_points: Optional[int] = None) ->
Returns:
float: Mean Average Precision metric.
"""
return reduce(lambda acc, curr: curr.average_precision(interpolation_points) + acc, self._by_class, .0) / len(self._by_class)
return reduce(lambda acc, curr: curr.average_precision(interpolation_points) + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances)

def __str__(self):
return (
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
setup(
name='lapixdl',
packages=find_packages(exclude=['tests']),
version='0.7.2',
version='0.7.3',
description='Utils for Computer Vision Deep Learning research',

long_description=long_description,
Expand Down
12 changes: 12 additions & 0 deletions tests/evaluation/evaluate/test_evaluate_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,15 @@ def test_evaluation_detection_iou_metric():

assert round(metrics.avg_iou, 3) == .290
assert round(metrics.by_class[0].iou, 3) == .290

def test_evaluation_detection_iou_metric_w_more_classes():
classes = ['kite', 'person']

gt_bbox = BBox(110, 110, 320, 280, 0)

pred_bbox = BBox(70, 50, 240, 220, 0)

metrics = evaluate_detection([[gt_bbox]], [[pred_bbox]], classes)

assert round(metrics.avg_iou, 3) == .290
assert round(metrics.by_class[0].iou, 3) == .290

0 comments on commit c6f03c2

Please sign in to comment.