Skip to content

Commit

Permalink
fix: Improve anomaly threshold usage in inference task (#67)
Browse files Browse the repository at this point in the history
* fix: Use training threshold to generate reports instead of 0 for anomaly test with no gt

* feat: Use image training threshold automatically if no labels is provided for testing

Allow defining training threshold type from config

* docs: Update anomaly test documentation

* build: Bump version 1.2.5 -> 1.2.6

* docs: Update changelog

Approved By: @rcmalli
  • Loading branch information
lorenzomammana authored Oct 2, 2023
1 parent 9596efa commit 786cca5
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 8 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,15 @@
# Changelog
All notable changes to this project will be documented in this file.

### [1.2.6]

#### Added

- Add optional `training_threshold_type` for anomaly detection inference task.
#### Changed

- Compute results using the training image threshold instead of zero when running anomaly inference with no labelled data.

### [1.2.5]

#### Fixed
Expand Down
2 changes: 2 additions & 0 deletions docs/tutorials/examples/anomaly_detection.md
Original file line number Diff line number Diff line change
Expand Up @@ -290,9 +290,11 @@ datamodule:
task:
model_path: ???
use_training_threshold: false
training_threshold_type: image
```

By default, the inference will recompute the threshold based on test data to maximize the F1-score, if you want to use the threshold from the training phase you can set the `use_training_threshold` parameter to true.
The `training_threshold_type` can be used to specify which training threshold to use, it can be either `image` or `pixel`, if not specified the `image` threshold will be used.

The model path is the path to an exported model, at the moment `torchscript` and `onnx` models are supported (exported automatically after a training experiment). Right now only the `CFLOW` model is not supported for inference as it's not compatible with botyh torchscript and onnx.

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "quadra"
version = "1.2.5"
version = "1.2.6"
description = "Deep Learning experiment orchestration library"
authors = [
{ name = "Alessandro Polidori", email = "alessandro.polidori@orobix.com" },
Expand Down Expand Up @@ -118,7 +118,7 @@ repository = "https://github.com/orobix/quadra"

# Adapted from https://realpython.com/pypi-publish-python-package/#version-your-package
[tool.bumpver]
current_version = "1.2.5"
current_version = "1.2.6"
version_pattern = "MAJOR.MINOR.PATCH"
commit_message = "build: Bump version {old_version} -> {new_version}"
commit = true
Expand Down
2 changes: 1 addition & 1 deletion quadra/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "1.2.5"
__version__ = "1.2.6"


def get_version():
Expand Down
1 change: 1 addition & 0 deletions quadra/configs/experiment/base/anomaly/inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ datamodule:
task:
model_path: ???
use_training_threshold: false
training_threshold_type: image
24 changes: 19 additions & 5 deletions quadra/tasks/anomaly.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json
import os
from collections import Counter
from typing import Dict, Generic, List, Optional, TypeVar, Union, cast
from typing import Dict, Generic, List, Literal, Optional, TypeVar, Union, cast

import cv2
import hydra
Expand Down Expand Up @@ -297,12 +297,26 @@ class AnomalibEvaluation(Evaluation[AnomalyDataModule]):
"""

def __init__(
self, config: DictConfig, model_path: str, use_training_threshold: bool = False, device: Optional[str] = None
self,
config: DictConfig,
model_path: str,
use_training_threshold: bool = False,
device: Optional[str] = None,
training_threshold_type: Optional[Literal["image", "pixel"]] = None,
):
super().__init__(config=config, model_path=model_path, device=device)

self.use_training_threshold = use_training_threshold

if training_threshold_type is not None and training_threshold_type not in ["image", "pixel"]:
raise ValueError("Training threshold type must be either image or pixel")

if training_threshold_type is None and use_training_threshold:
log.warning("Using training threshold but no training threshold type is provided, defaulting to image")
training_threshold_type = "image"

self.training_threshold_type = training_threshold_type

def prepare(self) -> None:
"""Prepare the evaluation."""
super().prepare()
Expand Down Expand Up @@ -349,7 +363,7 @@ def test(self) -> None:
if any(x != -1 for x in image_labels):
if self.use_training_threshold:
_image_labels = torch.tensor(image_labels)
threshold = torch.tensor(float(self.model_data["image_threshold"]))
threshold = torch.tensor(float(self.model_data[f"{self.training_threshold_type}_threshold"]))
known_labels = torch.where(_image_labels != -1)[0]

_image_labels = _image_labels[known_labels]
Expand All @@ -362,9 +376,9 @@ def test(self) -> None:
optimal_f1_score = optimal_f1.compute()
threshold = optimal_f1.threshold
else:
log.warning("No ground truth available during evaluation, F1 score and threshold set to 0")
log.warning("No ground truth available during evaluation, use training image threshold for reporting")
optimal_f1_score = torch.tensor(0)
threshold = torch.tensor(0)
threshold = torch.tensor(float(self.model_data["image_threshold"]))

log.info("Computed F1 score: %s", optimal_f1_score.item())
self.metadata["anomaly_scores"] = anomaly_scores
Expand Down

0 comments on commit 786cca5

Please sign in to comment.