From b15f8bbf36ff108f02fc594b2267d6dadb0be059 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 19:15:54 -0300 Subject: [PATCH 1/9] add cached property at model --- lapixdl/evaluation/model.py | 62 +++++++++---------- .../model/test_BinaryClassificationMetrics.py | 51 +++++---------- 2 files changed, 45 insertions(+), 68 deletions(-) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index 6131ccc..d36fbbd 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -2,7 +2,7 @@ from typing import Optional, Union, List, Tuple, Generic, TypeVar from enum import Enum from dataclasses import dataclass -from functools import reduce +from functools import cached_property, reduce import math import numpy as np @@ -75,22 +75,22 @@ class BBox: cls: int score: Optional[float] = None - @property + @cached_property def upper_left_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the upper left point of the Bounding Box.""" return (self.upper_left_x, self.upper_left_y) - @property + @cached_property def bottom_right_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the bottom right point of the Bounding Box.""" return (self.upper_left_x + self.width - 1, self.upper_left_y + self.height - 1) - @property + @cached_property def center_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the center point of the Bounding Box.""" return ((self.upper_left_x + self.width - 1) // 2, (self.upper_left_y + self.height - 1) // 2) - @property + @cached_property def area(self) -> int: """int: Area of the Bounding Box.""" return self.width * self.height @@ -174,38 +174,38 @@ class BinaryClassificationMetrics: TN: int = 0 FN: int = 0 - @property + @cached_property def has_instances(self) -> bool: """int: Indicates if the class has any ground truth or predicted instances.""" return self.count > 0 - @property + @cached_property def count(self) -> int: """int: Total count of classified instances.""" return self.TP + self.TN + self.FP + self.FN - @property + @cached_property def accuracy(self) -> float: """int: Total count of classified instances.""" if self.count == 0: return math.nan return (self.TP + self.TN)/self.count - @property + @cached_property def recall(self) -> float: """float: Recall metric - TP / (TP + FN).""" if self.TP == 0 and self.FN == 0: return math.nan return self.TP/(self.TP + self.FN) - @property + @cached_property def false_positive_rate(self) -> float: """float: False Positive Rate (FPR) metric - FP / (FP + TN).""" if self.FP == 0 and self.TN == 0: return math.nan return self.FP/(self.FP + self.TN) - @property + @cached_property def specificity(self) -> float: """float: Specificity metric - TN / (FP + TN).""" if self.FP == 0 and self.TN == 0: @@ -213,7 +213,7 @@ def specificity(self) -> float: return self.TN/(self.FP + self.TN) - @property + @cached_property def precision(self) -> float: """float: Precision metric - TP / (FP + TP).""" if self.FP == 0 and self.FN == 0: # No GT instances @@ -222,7 +222,7 @@ def precision(self) -> float: return math.nan return self.TP/(self.FP + self.TP) - @property + @cached_property def f_score(self) -> float: """float: F-Score/Dice metric - 2*TP / (FP + FN + 2*TP).""" quotient = (self.FP + self.FN + 2*self.TP) @@ -232,7 +232,7 @@ def f_score(self) -> float: return math.nan return 2*self.TP/quotient - @property + @cached_property def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return [[self.TP, self.FP], [self.FN, self.TN]] @@ -296,22 +296,22 @@ def by_class(self) -> List[BinaryClassificationMetrics]: """List[BinaryClassificationMetrics]: Binary metrics calculated for each class index.""" return self._by_class - @property + @cached_property def by_class_w_instances(self) -> List[BinaryClassificationMetrics]: """List[BinaryClassificationMetrics]: Binary metrics calculated for each class index with instances.""" return self._by_class_w_instances - @property + @cached_property def count(self) -> int: """int: Total count of classified instances.""" return self._count - @property + @cached_property def accuracy(self) -> float: """float: Accuracy metric - correct classifications / count.""" return np.diagonal(self._confusion_matrix).sum() / self.count - @property + @cached_property def avg_recall(self) -> float: """float: Macro average recall metric.""" by_class_w_recall = [ @@ -321,29 +321,29 @@ def avg_recall(self) -> float: return 1 return reduce(lambda acc, curr: curr.recall + acc, by_class_w_recall, .0) / len(by_class_w_recall) - @property + @cached_property def avg_precision(self) -> float: """float: Macro average precision metric.""" return reduce(lambda acc, curr: (0 if math.isnan(curr.precision) else curr.precision) + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - @property + @cached_property def avg_specificity(self) -> float: """float: Macro average specificity metric.""" by_class_w_specificity = [ c for c in self.by_class_w_instances if not math.isnan(c.specificity)] return reduce(lambda acc, curr: curr.specificity + acc, by_class_w_specificity, .0) / len(by_class_w_specificity) - @property + @cached_property def avg_f_score(self) -> float: """float: Macro average F-Score/Dice metric.""" return reduce(lambda acc, curr: curr.f_score + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - @property + @cached_property def avg_false_positive_rate(self) -> float: """float: Macro average False Positive Rate metric.""" return reduce(lambda acc, curr: curr.false_positive_rate + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - @property + @cached_property def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return self._confusion_matrix @@ -411,7 +411,7 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics): FN=classification_metrics.FN ) - @property + @cached_property def iou(self) -> float: """float: IoU/Jaccard Index metric - TP / (FP + FN + TP).""" return self.TP / (self.FP + self.FN + self.TP) @@ -458,12 +458,12 @@ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []): self._by_class_w_instances = [ x for x in self.by_class if x.has_instances] - @property + @cached_property def avg_iou(self) -> float: """float: Macro average IoU/Jaccard Index metric.""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances) - @property + @cached_property def avg_iou_no_bkg(self) -> float: """float: Macro average IoU/Jaccard Index metric without `background` class (index 0).""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances[1:], .0) / (len(self._by_class_w_instances) - 1) @@ -508,17 +508,17 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics, self._precision_recall_curve = self.__calculate_precision_recall_curve( predictions) if self.gt_count > 0 else [] - @property + @cached_property def gt_count(self) -> int: """int: Total count of GT bboxes.""" return self.TP + self.FN - @property + @cached_property def predicted_count(self) -> int: """int: Total count of predicted bboxes.""" return self.TP + self.FP - @property + @cached_property def iou(self) -> float: """float: IoU/Jaccard Index metric. @@ -526,7 +526,7 @@ def iou(self) -> float: """ return self._iou - @property + @cached_property def precision_recall_curve(self) -> List[Tuple[float, float]]: """List[Tuple[float, float]]: Precision x Recall curve as a list of (Recall, Precision) tuples.""" assert self.gt_count > 0, "This class does not have instances." @@ -641,7 +641,7 @@ def __init__(self, classes: List[str], self._by_class_w_instances = [ x for x in self.by_class if x.has_instances] - @property + @cached_property def avg_iou(self): """float: Macro average IoU/Jaccard Index metric. diff --git a/tests/evaluation/model/test_BinaryClassificationMetrics.py b/tests/evaluation/model/test_BinaryClassificationMetrics.py index 634cb9e..cf88d45 100644 --- a/tests/evaluation/model/test_BinaryClassificationMetrics.py +++ b/tests/evaluation/model/test_BinaryClassificationMetrics.py @@ -3,15 +3,14 @@ def test_count(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.FN = 1 - bin_class.TP = 2 + bin_class_A = BinaryClassificationMetrics(cls=['a', 'b'], FN=1, TP=2) - assert bin_class.count == 3 + assert bin_class_A.count == 3 - bin_class.FP = 2 + bin_class_B = BinaryClassificationMetrics(cls=['a', 'b'], FN=1, TP=2, FP=2) - assert bin_class.count == 5 + + assert bin_class_B.count == 5 def test_accuracy(): @@ -21,70 +20,48 @@ def test_accuracy(): def test_recall(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TP = 2 - bin_class.FN = 6 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], TP=2, FN=6) assert bin_class.recall == 0.25 def test_recall_zero(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TP = 0 - bin_class.FN = 0 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], TP=0, FN=0) assert math.isnan(bin_class.recall) def test_fpr(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.FP = 2 - bin_class.TN = 6 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], FP=2, TN=6) assert bin_class.false_positive_rate == 0.25 def test_specificity(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TN = 6 - bin_class.FP = 2 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], FP=2, TN=6) assert bin_class.specificity == 0.75 def test_specificity_zero(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TN = 0 - bin_class.FP = 0 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], FP=0, TN=0) assert math.isnan(bin_class.specificity) def test_precision(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TP = 8 - bin_class.FP = 2 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], TP=8, FP=2) assert bin_class.precision == 0.8 def test_precision_zero(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.TP = 0 - bin_class.FP = 0 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], TP=0, FP=0) assert bin_class.precision == 1 def test_f_score(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.FN = 4 - bin_class.TN = 2 - bin_class.TP = 5 - bin_class.FP = 6 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], FN=4, TN=2, TP=5, FP=6) assert bin_class.f_score == 0.5 def test_f_score_zero(): - bin_class = BinaryClassificationMetrics(cls=['a', 'b']) - bin_class.FN = 0 - bin_class.TN = 0 - bin_class.TP = 0 - bin_class.FP = 0 + bin_class = BinaryClassificationMetrics(cls=['a', 'b'], FN=0, TN=0, TP=0, FP=0) assert bin_class.f_score == 1 From 327bae9869ad6190214f835617e1e254b2dbbc14 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 19:17:01 -0300 Subject: [PATCH 2/9] fix division by zero at IoU --- lapixdl/evaluation/model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index d36fbbd..e15f580 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -414,7 +414,10 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics): @cached_property def iou(self) -> float: """float: IoU/Jaccard Index metric - TP / (FP + FN + TP).""" - return self.TP / (self.FP + self.FN + self.TP) + quotient = (self.FP + self.FN + self.TP) + if quotient == 0: + return math.nan + return self.TP / quotient def __str__(self): return ( From 09a4706c7839948ff93c1ac8ab6ae7ccb6840e48 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 19:34:40 -0300 Subject: [PATCH 3/9] add to_dict method at models --- lapixdl/evaluation/model.py | 89 +++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index e15f580..d74acd8 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -261,6 +261,22 @@ def __str__(self): f'\tF-Score: {self.f_score}' ) + def to_dict(self): + if not self.has_instances: + return {} + else: + return { + 'TP': self.TP, + 'TN': self.TN, + 'FP': self.FP, + 'FN': self.FN, + 'FPR': self.false_positive_rate, + 'Accuracy': self.accuracy, + 'Recall': recall_string(self.recall), + 'Precision': self.precision, + 'Specificity': specificity_string(self.specificity), + 'F-Score': self.f_score, + } class ClassificationMetrics: """Multiclass classification metrics @@ -387,6 +403,20 @@ def __str__(self): ) + '\n'.join([cls_metrics.__str__() for cls_metrics in self.by_class]) + def to_dict(self): + return { + 'Sample Count': self.count, + 'Accuracy': self.accuracy, + 'Avg Recall': self.avg_recall, + 'Avg Precision': self.avg_precision, + 'Avg Specificity': self.avg_specificity, + 'Avg FPR': self.avg_false_positive_rate, + 'Avg F-Score': self.avg_f_score, + 'By Class': { + cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class + } + } + class BinarySegmentationMetrics(BinaryClassificationMetrics): """Binary pixel-based classification metrics @@ -434,6 +464,21 @@ def __str__(self): f'\tFPR: {self.false_positive_rate}\n' f'\tF-Score: {self.f_score}\n' ) + + def to_dict(self): + return { + 'TP': self.TP, + 'TN': self.TN, + 'FP': self.FP, + 'FN': self.FN, + 'IoU': self.iou, + 'Accuracy': self.accuracy, + 'Recall': recall_string(self.recall), + 'Precision': self.precision, + 'Specificity': specificity_string(self.specificity), + 'FPR': self.false_positive_rate, + 'F-Score': self.f_score + } class SegmentationMetrics(ClassificationMetrics): @@ -486,6 +531,22 @@ def __str__(self): f'By Class:\n\n' ) + '\n'.join(list([cls_metrics.__str__() for cls_metrics in self.by_class])) + + def to_dict(self): + return { + 'Pixel Count': self.count, + 'Accuracy': self.accuracy, + 'Avg Recall': self.avg_recall, + 'Avg Precision': self.avg_precision, + 'Avg Specificity': self.avg_specificity, + 'Avg F-Score': self.avg_f_score, + 'Avg FPR': self.avg_false_positive_rate, + 'Avg IoU': self.avg_iou, + 'Avg IoU w/o Background': self.avg_iou_no_bkg, + 'By Class': { + cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class + } + } class BinaryDetectionMetrics(BinaryClassificationMetrics): @@ -620,6 +681,21 @@ def __str__(self): f'\tAverage Precision: {self.average_precision()}\n' f'\t11-point Average Precision: {self.average_precision(11)}\n' ) + + def to_dict(self): + return { + 'TP': self.TP, + 'FP': self.FP, + 'FN': self.FN, + 'TN': math.nan, + 'IoU': self.iou, + 'Accuracy': self.accuracy, + 'Recall': recall_string(self.recall), + 'Precision': self.precision, + 'F-Score': self.f_score, + 'Average Precision': self.average_precision(), + '11-point Average Precision': self.average_precision(11) + } class DetectionMetrics(ClassificationMetrics): @@ -676,3 +752,16 @@ def __str__(self): f'By Class:\n\n' ) + '\n'.join(list([cls_metrics.__str__() for cls_metrics in self.by_class])) + + def to_dict(self): + return { + 'Bboxes Count':self.count, + 'Accuracy':self.accuracy, + 'Avg Recall':self.avg_recall, + 'Avg Precision':self.avg_precision, + 'Avg F-Score':self.avg_f_score, + 'Avg IoU':self.avg_iou, + 'By Class': { + cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class + } + } \ No newline at end of file From 35d0bb1f21cb78b80d28cac1ef3c0ffc0c7f2b92 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 20:20:46 -0300 Subject: [PATCH 4/9] remove unused imports --- tests/evaluation/evaluate/test_evaluate_detection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/evaluation/evaluate/test_evaluate_detection.py b/tests/evaluation/evaluate/test_evaluate_detection.py index 7d63735..8af289b 100644 --- a/tests/evaluation/evaluate/test_evaluate_detection.py +++ b/tests/evaluation/evaluate/test_evaluate_detection.py @@ -1,9 +1,8 @@ import pytest -import numpy as np from lapixdl.evaluation.evaluate import evaluate_detection from lapixdl.evaluation.model import BBox -from ... import utils + def test_evaluation_detection_iou_metric(): From 3ddf55b530d519a0104e218828d7d52e27eed8c7 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 20:22:45 -0300 Subject: [PATCH 5/9] add pandas as requeriments --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 33ebbe8..007cabd 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ author='LAPiX', license='MIT', - install_requires=['numpy', 'tqdm', 'seaborn', 'pandas', 'matplotlib'], + install_requires=['numpy', 'pandas', 'tqdm', 'seaborn', 'pandas', 'matplotlib'], setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite='tests', From 89e5c4807122d79102d25c2bf124bcdd686535b1 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 20:23:23 -0300 Subject: [PATCH 6/9] add to_dataframe into multicategorical models --- lapixdl/evaluation/model.py | 112 ++++++++++++++++++++++++------------ 1 file changed, 74 insertions(+), 38 deletions(-) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index d74acd8..ce78cb2 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -1,11 +1,15 @@ from __future__ import annotations -from typing import Optional, Union, List, Tuple, Generic, TypeVar -from enum import Enum -from dataclasses import dataclass -from functools import cached_property, reduce + import math import numpy as np +from numpy.lib.function_base import average +import pandas as pd + +from typing import Optional, Union, List, Tuple, Generic, TypeVar +from enum import Enum +from dataclasses import dataclass +from functools import cached_property, lru_cache, reduce from . import plot @@ -261,7 +265,7 @@ def __str__(self): f'\tF-Score: {self.f_score}' ) - def to_dict(self): + def to_dict(self) -> dict: if not self.has_instances: return {} else: @@ -403,19 +407,30 @@ def __str__(self): ) + '\n'.join([cls_metrics.__str__() for cls_metrics in self.by_class]) - def to_dict(self): + + + def to_dict(self) -> dict: return { 'Sample Count': self.count, - 'Accuracy': self.accuracy, - 'Avg Recall': self.avg_recall, - 'Avg Precision': self.avg_precision, - 'Avg Specificity': self.avg_specificity, - 'Avg FPR': self.avg_false_positive_rate, - 'Avg F-Score': self.avg_f_score, - 'By Class': { - cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class - } + 'Average': { + 'Accuracy': self.accuracy, + 'Recall': self.avg_recall, + 'Precision': self.avg_precision, + 'Specificity': self.avg_specificity, + 'FPR': self.avg_false_positive_rate, + 'F-Score': self.avg_f_score + }, + 'By Class': dict_by_class(self.by_class) } + + def to_dataframe(self) -> pd.DataFrame: + as_dict = self.to_dict() + dict_to_df = { + 'Average': as_dict['Average'], + **as_dict['By Class'] + } + return pd.DataFrame(dict_to_df) + class BinarySegmentationMetrics(BinaryClassificationMetrics): @@ -465,7 +480,7 @@ def __str__(self): f'\tF-Score: {self.f_score}\n' ) - def to_dict(self): + def to_dict(self) -> dict: return { 'TP': self.TP, 'TN': self.TN, @@ -532,21 +547,29 @@ def __str__(self): ) + '\n'.join(list([cls_metrics.__str__() for cls_metrics in self.by_class])) - def to_dict(self): + def to_dict(self) -> dict: return { 'Pixel Count': self.count, - 'Accuracy': self.accuracy, - 'Avg Recall': self.avg_recall, - 'Avg Precision': self.avg_precision, - 'Avg Specificity': self.avg_specificity, - 'Avg F-Score': self.avg_f_score, - 'Avg FPR': self.avg_false_positive_rate, - 'Avg IoU': self.avg_iou, - 'Avg IoU w/o Background': self.avg_iou_no_bkg, - 'By Class': { - cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class - } + 'Average': { + 'Accuracy': self.accuracy, + 'Recall': self.avg_recall, + 'Precision': self.avg_precision, + 'Specificity': self.avg_specificity, + 'F-Score': self.avg_f_score, + 'FPR': self.avg_false_positive_rate, + 'IoU': self.avg_iou, + 'IoU w/o Background': self.avg_iou_no_bkg + }, + 'By Class': dict_by_class(self.by_class) } + + def to_dataframe(self) -> pd.DataFrame: + as_dict = self.to_dict() + dict_to_df = { + 'Average': as_dict['Average'], + **as_dict['By Class'] + } + return pd.DataFrame(dict_to_df) class BinaryDetectionMetrics(BinaryClassificationMetrics): @@ -682,7 +705,7 @@ def __str__(self): f'\t11-point Average Precision: {self.average_precision(11)}\n' ) - def to_dict(self): + def to_dict(self) -> dict: return { 'TP': self.TP, 'FP': self.FP, @@ -753,15 +776,28 @@ def __str__(self): ) + '\n'.join(list([cls_metrics.__str__() for cls_metrics in self.by_class])) - def to_dict(self): + def to_dict(self) -> dict: return { 'Bboxes Count':self.count, - 'Accuracy':self.accuracy, - 'Avg Recall':self.avg_recall, - 'Avg Precision':self.avg_precision, - 'Avg F-Score':self.avg_f_score, - 'Avg IoU':self.avg_iou, - 'By Class': { - cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in self.by_class - } + 'Average': { + 'Accuracy':self.accuracy, + 'Recall':self.avg_recall, + 'Precision':self.avg_precision, + 'F-Score':self.avg_f_score, + 'IoU':self.avg_iou + }, + 'By Class': dict_by_class(self.by_class) + } + + def to_dataframe(self) -> pd.DataFrame: + as_dict = self.to_dict() + dict_to_df = { + 'Average': as_dict['Average'], + **as_dict['By Class'] + } + return pd.DataFrame(dict_to_df) + +def dict_by_class(by_class) -> dict: + return { + cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in by_class } \ No newline at end of file From 5c4fa48f2e66dc7b5bf7018bc15ecc72591b9978 Mon Sep 17 00:00:00 2001 From: "Joao G.A. Amorim" Date: Sat, 18 Dec 2021 20:23:47 -0300 Subject: [PATCH 7/9] update print metrics with to_dataframe --- lapixdl/evaluation/evaluate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lapixdl/evaluation/evaluate.py b/lapixdl/evaluation/evaluate.py index 316f31e..aba7174 100644 --- a/lapixdl/evaluation/evaluate.py +++ b/lapixdl/evaluation/evaluate.py @@ -52,7 +52,7 @@ def evaluate_segmentation(gt_masks: Iterable[Mask], metrics = SegmentationMetrics(classes, confusion_matrix) - print(metrics) + print(metrics.to_dataframe()) return metrics @@ -83,7 +83,7 @@ def evaluate_classification(gt_classifications: Iterable[Classification], metrics = ClassificationMetrics(classes, confusion_matrix) - print(metrics) + print(metrics.to_dataframe()) return metrics @@ -174,7 +174,7 @@ def evaluate_detection(gt_bboxes: Iterable[List[BBox]], cls_ious_sum / tot_images, predictions_by_class) - print(metrics) + print(metrics.to_dataframe()) return metrics From ca2b0c0e557bc18b5be9104e876decbfb457a386 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Gustavo=20A=2E=20Amorim?= Date: Thu, 23 Dec 2021 01:33:48 -0300 Subject: [PATCH 8/9] change cached_property --- lapixdl/evaluation/model.py | 108 +++++++++++++++++++++++------------- 1 file changed, 69 insertions(+), 39 deletions(-) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index ce78cb2..7e45e33 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -9,7 +9,7 @@ from typing import Optional, Union, List, Tuple, Generic, TypeVar from enum import Enum from dataclasses import dataclass -from functools import cached_property, lru_cache, reduce +from functools import lru_cache, reduce from . import plot @@ -78,23 +78,27 @@ class BBox: height: int cls: int score: Optional[float] = None - - @cached_property + + @property + @lru_cache def upper_left_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the upper left point of the Bounding Box.""" return (self.upper_left_x, self.upper_left_y) - @cached_property + @property + @lru_cache def bottom_right_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the bottom right point of the Bounding Box.""" return (self.upper_left_x + self.width - 1, self.upper_left_y + self.height - 1) - @cached_property + @property + @lru_cache def center_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the center point of the Bounding Box.""" return ((self.upper_left_x + self.width - 1) // 2, (self.upper_left_y + self.height - 1) // 2) - @cached_property + @property + @lru_cache def area(self) -> int: """int: Area of the Bounding Box.""" return self.width * self.height @@ -178,38 +182,44 @@ class BinaryClassificationMetrics: TN: int = 0 FN: int = 0 - @cached_property + @property + @lru_cache def has_instances(self) -> bool: """int: Indicates if the class has any ground truth or predicted instances.""" return self.count > 0 - @cached_property + @property + @lru_cache def count(self) -> int: """int: Total count of classified instances.""" return self.TP + self.TN + self.FP + self.FN - - @cached_property + + @property + @lru_cache def accuracy(self) -> float: """int: Total count of classified instances.""" if self.count == 0: return math.nan return (self.TP + self.TN)/self.count - @cached_property + @property + @lru_cache def recall(self) -> float: """float: Recall metric - TP / (TP + FN).""" if self.TP == 0 and self.FN == 0: return math.nan return self.TP/(self.TP + self.FN) - @cached_property + @property + @lru_cache def false_positive_rate(self) -> float: """float: False Positive Rate (FPR) metric - FP / (FP + TN).""" if self.FP == 0 and self.TN == 0: return math.nan return self.FP/(self.FP + self.TN) - - @cached_property + + @property + @lru_cache def specificity(self) -> float: """float: Specificity metric - TN / (FP + TN).""" if self.FP == 0 and self.TN == 0: @@ -217,7 +227,8 @@ def specificity(self) -> float: return self.TN/(self.FP + self.TN) - @cached_property + @property + @lru_cache def precision(self) -> float: """float: Precision metric - TP / (FP + TP).""" if self.FP == 0 and self.FN == 0: # No GT instances @@ -226,7 +237,8 @@ def precision(self) -> float: return math.nan return self.TP/(self.FP + self.TP) - @cached_property + @property + @lru_cache def f_score(self) -> float: """float: F-Score/Dice metric - 2*TP / (FP + FN + 2*TP).""" quotient = (self.FP + self.FN + 2*self.TP) @@ -236,7 +248,8 @@ def f_score(self) -> float: return math.nan return 2*self.TP/quotient - @cached_property + @property + @lru_cache def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return [[self.TP, self.FP], [self.FN, self.TN]] @@ -316,22 +329,26 @@ def by_class(self) -> List[BinaryClassificationMetrics]: """List[BinaryClassificationMetrics]: Binary metrics calculated for each class index.""" return self._by_class - @cached_property + @property + @lru_cache def by_class_w_instances(self) -> List[BinaryClassificationMetrics]: """List[BinaryClassificationMetrics]: Binary metrics calculated for each class index with instances.""" return self._by_class_w_instances - @cached_property + @property + @lru_cache def count(self) -> int: """int: Total count of classified instances.""" return self._count - @cached_property + @property + @lru_cache def accuracy(self) -> float: """float: Accuracy metric - correct classifications / count.""" return np.diagonal(self._confusion_matrix).sum() / self.count - @cached_property + @property + @lru_cache def avg_recall(self) -> float: """float: Macro average recall metric.""" by_class_w_recall = [ @@ -341,29 +358,34 @@ def avg_recall(self) -> float: return 1 return reduce(lambda acc, curr: curr.recall + acc, by_class_w_recall, .0) / len(by_class_w_recall) - @cached_property + @property + @lru_cache def avg_precision(self) -> float: """float: Macro average precision metric.""" return reduce(lambda acc, curr: (0 if math.isnan(curr.precision) else curr.precision) + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - - @cached_property + + @property + @lru_cache def avg_specificity(self) -> float: """float: Macro average specificity metric.""" by_class_w_specificity = [ c for c in self.by_class_w_instances if not math.isnan(c.specificity)] return reduce(lambda acc, curr: curr.specificity + acc, by_class_w_specificity, .0) / len(by_class_w_specificity) - - @cached_property + + @property + @lru_cache def avg_f_score(self) -> float: """float: Macro average F-Score/Dice metric.""" return reduce(lambda acc, curr: curr.f_score + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - @cached_property + @property + @lru_cache def avg_false_positive_rate(self) -> float: """float: Macro average False Positive Rate metric.""" return reduce(lambda acc, curr: curr.false_positive_rate + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) - @cached_property + @property + @lru_cache def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return self._confusion_matrix @@ -456,7 +478,8 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics): FN=classification_metrics.FN ) - @cached_property + @property + @lru_cache def iou(self) -> float: """float: IoU/Jaccard Index metric - TP / (FP + FN + TP).""" quotient = (self.FP + self.FN + self.TP) @@ -521,12 +544,14 @@ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []): self._by_class_w_instances = [ x for x in self.by_class if x.has_instances] - @cached_property + @property + @lru_cache def avg_iou(self) -> float: """float: Macro average IoU/Jaccard Index metric.""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances) - @cached_property + @property + @lru_cache def avg_iou_no_bkg(self) -> float: """float: Macro average IoU/Jaccard Index metric without `background` class (index 0).""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances[1:], .0) / (len(self._by_class_w_instances) - 1) @@ -594,18 +619,21 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics, self._iou = iou self._precision_recall_curve = self.__calculate_precision_recall_curve( predictions) if self.gt_count > 0 else [] - - @cached_property + + property + @lru_cache def gt_count(self) -> int: """int: Total count of GT bboxes.""" return self.TP + self.FN - - @cached_property + + @property + @lru_cache def predicted_count(self) -> int: """int: Total count of predicted bboxes.""" return self.TP + self.FP - @cached_property + @property + @lru_cache def iou(self) -> float: """float: IoU/Jaccard Index metric. @@ -613,7 +641,8 @@ def iou(self) -> float: """ return self._iou - @cached_property + @property + @lru_cache def precision_recall_curve(self) -> List[Tuple[float, float]]: """List[Tuple[float, float]]: Precision x Recall curve as a list of (Recall, Precision) tuples.""" assert self.gt_count > 0, "This class does not have instances." @@ -743,7 +772,8 @@ def __init__(self, classes: List[str], self._by_class_w_instances = [ x for x in self.by_class if x.has_instances] - @cached_property + @property + @lru_cache def avg_iou(self): """float: Macro average IoU/Jaccard Index metric. @@ -800,4 +830,4 @@ def to_dataframe(self) -> pd.DataFrame: def dict_by_class(by_class) -> dict: return { cls_metrics.cls: cls_metrics.to_dict() for cls_metrics in by_class - } \ No newline at end of file + } From c4646d1dd78d196cfff928073f36202f4982d31f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Gustavo=20A=2E=20Amorim?= Date: Thu, 23 Dec 2021 01:41:46 -0300 Subject: [PATCH 9/9] add initialization to lru_cache --- lapixdl/evaluation/model.py | 60 ++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/lapixdl/evaluation/model.py b/lapixdl/evaluation/model.py index 7e45e33..f118248 100644 --- a/lapixdl/evaluation/model.py +++ b/lapixdl/evaluation/model.py @@ -80,25 +80,25 @@ class BBox: score: Optional[float] = None @property - @lru_cache + @lru_cache() def upper_left_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the upper left point of the Bounding Box.""" return (self.upper_left_x, self.upper_left_y) @property - @lru_cache + @lru_cache() def bottom_right_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the bottom right point of the Bounding Box.""" return (self.upper_left_x + self.width - 1, self.upper_left_y + self.height - 1) @property - @lru_cache + @lru_cache() def center_point(self) -> Tuple[int, int]: """Tuple[int, int]: (X,Y) of the center point of the Bounding Box.""" return ((self.upper_left_x + self.width - 1) // 2, (self.upper_left_y + self.height - 1) // 2) @property - @lru_cache + @lru_cache() def area(self) -> int: """int: Area of the Bounding Box.""" return self.width * self.height @@ -183,19 +183,19 @@ class BinaryClassificationMetrics: FN: int = 0 @property - @lru_cache + @lru_cache() def has_instances(self) -> bool: """int: Indicates if the class has any ground truth or predicted instances.""" return self.count > 0 @property - @lru_cache + @lru_cache() def count(self) -> int: """int: Total count of classified instances.""" return self.TP + self.TN + self.FP + self.FN @property - @lru_cache + @lru_cache() def accuracy(self) -> float: """int: Total count of classified instances.""" if self.count == 0: @@ -203,7 +203,7 @@ def accuracy(self) -> float: return (self.TP + self.TN)/self.count @property - @lru_cache + @lru_cache() def recall(self) -> float: """float: Recall metric - TP / (TP + FN).""" if self.TP == 0 and self.FN == 0: @@ -211,7 +211,7 @@ def recall(self) -> float: return self.TP/(self.TP + self.FN) @property - @lru_cache + @lru_cache() def false_positive_rate(self) -> float: """float: False Positive Rate (FPR) metric - FP / (FP + TN).""" if self.FP == 0 and self.TN == 0: @@ -219,7 +219,7 @@ def false_positive_rate(self) -> float: return self.FP/(self.FP + self.TN) @property - @lru_cache + @lru_cache() def specificity(self) -> float: """float: Specificity metric - TN / (FP + TN).""" if self.FP == 0 and self.TN == 0: @@ -228,7 +228,7 @@ def specificity(self) -> float: return self.TN/(self.FP + self.TN) @property - @lru_cache + @lru_cache() def precision(self) -> float: """float: Precision metric - TP / (FP + TP).""" if self.FP == 0 and self.FN == 0: # No GT instances @@ -238,7 +238,7 @@ def precision(self) -> float: return self.TP/(self.FP + self.TP) @property - @lru_cache + @lru_cache() def f_score(self) -> float: """float: F-Score/Dice metric - 2*TP / (FP + FN + 2*TP).""" quotient = (self.FP + self.FN + 2*self.TP) @@ -249,7 +249,7 @@ def f_score(self) -> float: return 2*self.TP/quotient @property - @lru_cache + @lru_cache() def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return [[self.TP, self.FP], [self.FN, self.TN]] @@ -330,25 +330,25 @@ def by_class(self) -> List[BinaryClassificationMetrics]: return self._by_class @property - @lru_cache + @lru_cache() def by_class_w_instances(self) -> List[BinaryClassificationMetrics]: """List[BinaryClassificationMetrics]: Binary metrics calculated for each class index with instances.""" return self._by_class_w_instances @property - @lru_cache + @lru_cache() def count(self) -> int: """int: Total count of classified instances.""" return self._count @property - @lru_cache + @lru_cache() def accuracy(self) -> float: """float: Accuracy metric - correct classifications / count.""" return np.diagonal(self._confusion_matrix).sum() / self.count @property - @lru_cache + @lru_cache() def avg_recall(self) -> float: """float: Macro average recall metric.""" by_class_w_recall = [ @@ -359,13 +359,13 @@ def avg_recall(self) -> float: return reduce(lambda acc, curr: curr.recall + acc, by_class_w_recall, .0) / len(by_class_w_recall) @property - @lru_cache + @lru_cache() def avg_precision(self) -> float: """float: Macro average precision metric.""" return reduce(lambda acc, curr: (0 if math.isnan(curr.precision) else curr.precision) + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) @property - @lru_cache + @lru_cache() def avg_specificity(self) -> float: """float: Macro average specificity metric.""" by_class_w_specificity = [ @@ -373,19 +373,19 @@ def avg_specificity(self) -> float: return reduce(lambda acc, curr: curr.specificity + acc, by_class_w_specificity, .0) / len(by_class_w_specificity) @property - @lru_cache + @lru_cache() def avg_f_score(self) -> float: """float: Macro average F-Score/Dice metric.""" return reduce(lambda acc, curr: curr.f_score + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) @property - @lru_cache + @lru_cache() def avg_false_positive_rate(self) -> float: """float: Macro average False Positive Rate metric.""" return reduce(lambda acc, curr: curr.false_positive_rate + acc, self.by_class_w_instances, .0) / len(self.by_class_w_instances) @property - @lru_cache + @lru_cache() def confusion_matrix(self) -> List[List[int]]: """List[List[int]]: Confusion matrix of all the classes""" return self._confusion_matrix @@ -479,7 +479,7 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics): ) @property - @lru_cache + @lru_cache() def iou(self) -> float: """float: IoU/Jaccard Index metric - TP / (FP + FN + TP).""" quotient = (self.FP + self.FN + self.TP) @@ -545,13 +545,13 @@ def __init__(self, classes: List[str], confusion_matrix: List[List[int]] = []): x for x in self.by_class if x.has_instances] @property - @lru_cache + @lru_cache() def avg_iou(self) -> float: """float: Macro average IoU/Jaccard Index metric.""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances, .0) / len(self._by_class_w_instances) @property - @lru_cache + @lru_cache() def avg_iou_no_bkg(self) -> float: """float: Macro average IoU/Jaccard Index metric without `background` class (index 0).""" return reduce(lambda acc, curr: curr.iou + acc, self._by_class_w_instances[1:], .0) / (len(self._by_class_w_instances) - 1) @@ -621,19 +621,19 @@ def __init__(self, classification_metrics: BinaryClassificationMetrics, predictions) if self.gt_count > 0 else [] property - @lru_cache + @lru_cache() def gt_count(self) -> int: """int: Total count of GT bboxes.""" return self.TP + self.FN @property - @lru_cache + @lru_cache() def predicted_count(self) -> int: """int: Total count of predicted bboxes.""" return self.TP + self.FP @property - @lru_cache + @lru_cache() def iou(self) -> float: """float: IoU/Jaccard Index metric. @@ -642,7 +642,7 @@ def iou(self) -> float: return self._iou @property - @lru_cache + @lru_cache() def precision_recall_curve(self) -> List[Tuple[float, float]]: """List[Tuple[float, float]]: Precision x Recall curve as a list of (Recall, Precision) tuples.""" assert self.gt_count > 0, "This class does not have instances." @@ -773,7 +773,7 @@ def __init__(self, classes: List[str], x for x in self.by_class if x.has_instances] @property - @lru_cache + @lru_cache() def avg_iou(self): """float: Macro average IoU/Jaccard Index metric.