Skip to content

Commit 8b52e33

Browse files
authored
Merge branch 'release/v2.0.0' into ci/add-new-gh-actions
2 parents eba3c5b + 7116fec commit 8b52e33

File tree

28 files changed

+384
-103
lines changed

28 files changed

+384
-103
lines changed

CHANGELOG.md

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
88

99
### Added
1010

11-
- Add `AUPIMO` tutorials notebooks in https://github.com/openvinotoolkit/anomalib/pull/2330 and https://github.com/openvinotoolkit/anomalib/pull/2336
12-
- Add `AUPIMO` metric by [jpcbertoldo](https://github.com/jpcbertoldo) in https://github.com/openvinotoolkit/anomalib/pull/1726 and refactored by [ashwinvaidya17](https://github.com/ashwinvaidya17) in https://github.com/openvinotoolkit/anomalib/pull/2329
13-
1411
### Removed
1512

16-
- Remove `RKDE` in https://github.com/openvinotoolkit/anomalib/pull/2455
17-
1813
### Changed
1914

2015
### Deprecated
@@ -23,6 +18,47 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
2318

2419
### New Contributors
2520

21+
## v2.0.0
22+
23+
### Added
24+
25+
- 🚀 Add `Dataclasses` and `PostProcessor` by @djdameln in https://github.com/openvinotoolkit/anomalib/pull/2098
26+
- 🚀 Add dataclass validators by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2307
27+
- 🚀 Add Customisable Image Visualizer by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2334
28+
- 🚀 Metrics redesign by @djdameln in https://github.com/openvinotoolkit/anomalib/pull/2326
29+
- 🚀 Add `PreProcessor` to `AnomalyModule` by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2358
30+
- 🚀 Add Multi-GPU Training Support by @ashwinvaidya17 in https://github.com/openvinotoolkit/anomalib/pull/2435
31+
- 🔨 Refactor: Add missing auxiliary attributes to `AnomalibModule` by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2460
32+
- 🔨 Rename `AnomalyModule` to `AnomalibModule` by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2423
33+
34+
- 🚀 Add `AUPIMO` metric by [jpcbertoldo](https://github.com/jpcbertoldo) in https://github.com/openvinotoolkit/anomalib/pull/1726 and refactored by [ashwinvaidya17](https://github.com/ashwinvaidya17) in https://github.com/openvinotoolkit/anomalib/pull/2329
35+
- 📚 Add `AUPIMO` tutorials notebooks in https://github.com/openvinotoolkit/anomalib/pull/2330 and https://github.com/openvinotoolkit/anomalib/pull/2336
36+
37+
### Removed
38+
39+
- 🗑️ Remove RKDE by @ashwinvaidya17 in https://github.com/openvinotoolkit/anomalib/pull/2455
40+
- 🗑️ Remove rich methods by @ashwinvaidya17 in https://github.com/openvinotoolkit/anomalib/pull/2283
41+
- 🔨 Replace `imgaug` with Native PyTorch Transforms by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2436
42+
- 🗑️ Remove task type by @djdameln in https://github.com/openvinotoolkit/anomalib/pull/2450
43+
44+
### Changed
45+
46+
- Refactor Lightning's `trainer.model` to `trainer.lightning_module` by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2255
47+
- Update open-clip-torch requirement from <2.26.1,>=2.23.0 to >=2.23.0,<2.26.2 by @dependabot in https://github.com/openvinotoolkit/anomalib/pull/2189
48+
- Update sphinx requirement by @dependabot in https://github.com/openvinotoolkit/anomalib/pull/2235
49+
- Update ruff configuration by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2269
50+
- Revert "Update open-clip-torch requirement from <2.26.1,>=2.23.0 to >=2.23.0,<2.26.2" by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2270
51+
- 🔨 Lint: U\* 🔨 Refactor BaseThreshold to Threshold by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2278
52+
- 🔨 Enable Ruff Rules: PLW1514 and PLR6201 by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2284
53+
- 🔨 Update nncf export by @ashwinvaidya17 in https://github.com/openvinotoolkit/anomalib/pull/2286
54+
- 🔨 Linting: Enable `PLR6301`, # could be a function, class method or static method by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2288
55+
- 🔨 Restructure unit tests and fix ruff issues by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2306pdate Ruff Config - Add Missing Copyright Headers by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/2281
56+
- 🔨 optimization/quantization added into 500 series by @paularamo in https://github.com/openvinotoolkit/anomalib/pull/2197
57+
58+
### Fixed
59+
60+
- 🐞Replace package_available with module_available by @harimkang in https://github.com/openvinotoolkit/anomalib/pull/2407
61+
2662
## [v1.2.0]
2763

2864
### Added

src/anomalib/engine/engine.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from anomalib.deploy import CompressionType, ExportType
2323
from anomalib.models import AnomalibModule
2424
from anomalib.utils.path import create_versioned_dir
25-
from anomalib.visualization import ImageVisualizer
2625

2726
logger = logging.getLogger(__name__)
2827

@@ -258,13 +257,13 @@ def _setup_trainer(self, model: AnomalibModule) -> None:
258257
self._cache.update(model)
259258

260259
# Setup anomalib callbacks to be used with the trainer
261-
self._setup_anomalib_callbacks(model)
260+
self._setup_anomalib_callbacks()
262261

263262
# Instantiate the trainer if it is not already instantiated
264263
if self._trainer is None:
265264
self._trainer = Trainer(**self._cache.args)
266265

267-
def _setup_anomalib_callbacks(self, model: AnomalibModule) -> None:
266+
def _setup_anomalib_callbacks(self) -> None:
268267
"""Set up callbacks for the trainer."""
269268
_callbacks: list[Callback] = []
270269

@@ -279,18 +278,6 @@ def _setup_anomalib_callbacks(self, model: AnomalibModule) -> None:
279278
),
280279
)
281280

282-
# Add the post-processor callback.
283-
if isinstance(model.post_processor, Callback):
284-
_callbacks.append(model.post_processor)
285-
286-
# Add the metrics callback.
287-
if isinstance(model.evaluator, Callback):
288-
_callbacks.append(model.evaluator)
289-
290-
# Add the image visualizer callback if it is passed by the user.
291-
if not any(isinstance(callback, ImageVisualizer) for callback in self._cache.args["callbacks"]):
292-
_callbacks.append(ImageVisualizer())
293-
294281
_callbacks.append(TimerCallback())
295282

296283
# Combine the callbacks, and update the trainer callbacks.

src/anomalib/models/components/base/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Copyright (C) 2022-2024 Intel Corporation
44
# SPDX-License-Identifier: Apache-2.0
55

6-
from .anomaly_module import AnomalibModule
6+
from .anomalib_module import AnomalibModule
77
from .buffer_list import BufferListMixin
88
from .dynamic_buffer import DynamicBufferMixin
99
from .memory_bank_module import MemoryBankMixin

src/anomalib/models/components/base/anomaly_module.py renamed to src/anomalib/models/components/base/anomalib_module.py

Lines changed: 137 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from anomalib.metrics.threshold import Threshold
2626
from anomalib.post_processing import OneClassPostProcessor, PostProcessor
2727
from anomalib.pre_processing import PreProcessor
28+
from anomalib.visualization import ImageVisualizer, Visualizer
2829

2930
from .export_mixin import ExportMixin
3031

@@ -40,8 +41,9 @@ class AnomalibModule(ExportMixin, pl.LightningModule, ABC):
4041
def __init__(
4142
self,
4243
pre_processor: PreProcessor | bool = True,
43-
post_processor: PostProcessor | None = None,
44+
post_processor: PostProcessor | bool = True,
4445
evaluator: Evaluator | bool = True,
46+
visualizer: Visualizer | bool = True,
4547
) -> None:
4648
super().__init__()
4749
logger.info("Initializing %s model.", self.__class__.__name__)
@@ -52,11 +54,12 @@ def __init__(
5254
self.callbacks: list[Callback]
5355

5456
self.pre_processor = self._resolve_pre_processor(pre_processor)
55-
self.post_processor = post_processor or self.default_post_processor()
57+
self.post_processor = self._resolve_post_processor(post_processor)
5658
self.evaluator = self._resolve_evaluator(evaluator)
59+
self.visualizer = self._resolve_visualizer(visualizer)
5760

5861
self._input_size: tuple[int, int] | None = None
59-
self._is_setup = False # flag to track if setup has been called from the trainer
62+
self._is_setup = False
6063

6164
@property
6265
def name(self) -> str:
@@ -79,28 +82,20 @@ def _setup(self) -> None:
7982
initialization.
8083
"""
8184

82-
def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProcessor | None:
83-
"""Resolve and validate which pre-processor to use..
84-
85-
Args:
86-
pre_processor: Pre-processor configuration
87-
- True -> use default pre-processor
88-
- False -> no pre-processor
89-
- PreProcessor -> use the provided pre-processor
85+
def configure_callbacks(self) -> Sequence[Callback] | Callback:
86+
"""Configure default callbacks for AnomalibModule.
9087
9188
Returns:
92-
Configured pre-processor
89+
List of callbacks that includes the pre-processor, post-processor, evaluator,
90+
and visualizer if they are available and inherit from Callback.
9391
"""
94-
if isinstance(pre_processor, PreProcessor):
95-
return pre_processor
96-
if isinstance(pre_processor, bool):
97-
return self.configure_pre_processor() if pre_processor else None
98-
msg = f"Invalid pre-processor type: {type(pre_processor)}"
99-
raise TypeError(msg)
100-
101-
def configure_callbacks(self) -> Sequence[Callback] | Callback:
102-
"""Configure default callbacks for AnomalibModule."""
103-
return [self.pre_processor] if self.pre_processor else []
92+
callbacks: list[Callback] = []
93+
callbacks.extend(
94+
component
95+
for component in (self.pre_processor, self.post_processor, self.evaluator, self.visualizer)
96+
if isinstance(component, Callback)
97+
)
98+
return callbacks
10499

105100
def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch:
106101
"""Perform the forward-pass by passing input tensor to the module.
@@ -170,6 +165,25 @@ def learning_type(self) -> LearningType:
170165
"""Learning type of the model."""
171166
raise NotImplementedError
172167

168+
def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProcessor | None:
169+
"""Resolve and validate which pre-processor to use..
170+
171+
Args:
172+
pre_processor: Pre-processor configuration
173+
- True -> use default pre-processor
174+
- False -> no pre-processor
175+
- PreProcessor -> use the provided pre-processor
176+
177+
Returns:
178+
Configured pre-processor
179+
"""
180+
if isinstance(pre_processor, PreProcessor):
181+
return pre_processor
182+
if isinstance(pre_processor, bool):
183+
return self.configure_pre_processor() if pre_processor else None
184+
msg = f"Invalid pre-processor type: {type(pre_processor)}"
185+
raise TypeError(msg)
186+
173187
@classmethod
174188
def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor:
175189
"""Configure the pre-processor.
@@ -214,15 +228,54 @@ def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> P
214228
]),
215229
)
216230

217-
def default_post_processor(self) -> PostProcessor | None:
218-
"""Default post processor.
231+
def _resolve_post_processor(self, post_processor: PostProcessor | bool) -> PostProcessor | None:
232+
"""Resolve and validate which post-processor to use.
219233
220-
Override in subclass for model-specific post-processing behaviour.
234+
Args:
235+
post_processor: Post-processor configuration
236+
- True -> use default post-processor
237+
- False -> no post-processor
238+
- PostProcessor -> use the provided post-processor
239+
240+
Returns:
241+
Configured post-processor
242+
"""
243+
if isinstance(post_processor, PostProcessor):
244+
return post_processor
245+
if isinstance(post_processor, bool):
246+
return self.configure_post_processor() if post_processor else None
247+
msg = f"Invalid post-processor type: {type(post_processor)}"
248+
raise TypeError(msg)
249+
250+
def configure_post_processor(self) -> PostProcessor | None:
251+
"""Configure the default post-processor based on the learning type.
252+
253+
Returns:
254+
PostProcessor: Configured post-processor instance.
255+
256+
Raises:
257+
NotImplementedError: If no default post-processor is available for the model's learning type.
258+
259+
Examples:
260+
Get default post-processor:
261+
262+
>>> post_processor = AnomalibModule.configure_post_processor()
263+
264+
Create model with custom post-processor:
265+
266+
>>> custom_post_processor = CustomPostProcessor()
267+
>>> model = PatchCore(post_processor=custom_post_processor)
268+
269+
Disable post-processing:
270+
271+
>>> model = PatchCore(post_processor=False)
221272
"""
222273
if self.learning_type == LearningType.ONE_CLASS:
223274
return OneClassPostProcessor()
224-
msg = f"No default post-processor available for model {self.__name__} with learning type {self.learning_type}. \
225-
Please override the default_post_processor method in the model implementation."
275+
msg = (
276+
f"No default post-processor available for model with learning type {self.learning_type}. "
277+
"Please override the configure_post_processor method in the model implementation."
278+
)
226279
raise NotImplementedError(msg)
227280

228281
def _resolve_evaluator(self, evaluator: Evaluator | bool) -> Evaluator | None:
@@ -251,6 +304,63 @@ def configure_evaluator() -> Evaluator:
251304
test_metrics = [image_auroc, image_f1score, pixel_auroc, pixel_f1score]
252305
return Evaluator(test_metrics=test_metrics)
253306

307+
def _resolve_visualizer(self, visualizer: Visualizer | bool) -> Visualizer | None:
308+
"""Resolve and validate which visualizer to use.
309+
310+
Args:
311+
visualizer: Visualizer configuration
312+
- True -> use default visualizer
313+
- False -> no visualizer
314+
- Visualizer -> use the provided visualizer
315+
316+
Returns:
317+
Configured visualizer
318+
"""
319+
if isinstance(visualizer, Visualizer):
320+
return visualizer
321+
if isinstance(visualizer, bool):
322+
return self.configure_visualizer() if visualizer else None
323+
msg = f"Visualizer must be of type Visualizer or bool, got {type(visualizer)}"
324+
raise TypeError(msg)
325+
326+
@classmethod
327+
def configure_visualizer(cls) -> ImageVisualizer:
328+
"""Configure the default visualizer.
329+
330+
By default, this method returns an ImageVisualizer instance, which is suitable for
331+
visualizing image-based anomaly detection results. However, the visualizer can be
332+
customized based on your needs - for example, using VideoVisualizer for video data
333+
or implementing a custom visualizer for specific visualization requirements.
334+
335+
Returns:
336+
Visualizer: Configured visualizer instance (ImageVisualizer by default).
337+
338+
Examples:
339+
Get default ImageVisualizer:
340+
341+
>>> visualizer = AnomalibModule.configure_visualizer()
342+
343+
Create model with VideoVisualizer:
344+
345+
>>> from custom_module import VideoVisualizer
346+
>>> video_visualizer = VideoVisualizer()
347+
>>> model = PatchCore(visualizer=video_visualizer)
348+
349+
Create model with custom visualizer:
350+
351+
>>> class CustomVisualizer(Visualizer):
352+
... def __init__(self):
353+
... super().__init__()
354+
... # Custom visualization logic
355+
>>> custom_visualizer = CustomVisualizer()
356+
>>> model = PatchCore(visualizer=custom_visualizer)
357+
358+
Disable visualization:
359+
360+
>>> model = PatchCore(visualizer=False)
361+
"""
362+
return ImageVisualizer()
363+
254364
@property
255365
def input_size(self) -> tuple[int, int] | None:
256366
"""Return the effective input size of the model.

src/anomalib/models/image/cfa/lightning_model.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from anomalib.models.components import AnomalibModule
2121
from anomalib.post_processing import PostProcessor
2222
from anomalib.pre_processing import PreProcessor
23+
from anomalib.visualization import Visualizer
2324

2425
from .loss import CfaLoss
2526
from .torch_model import CfaModel
@@ -58,11 +59,18 @@ def __init__(
5859
num_nearest_neighbors: int = 3,
5960
num_hard_negative_features: int = 3,
6061
radius: float = 1e-5,
62+
# Anomalib's Auxiliary Components
6163
pre_processor: PreProcessor | bool = True,
62-
post_processor: PostProcessor | None = None,
64+
post_processor: PostProcessor | bool = True,
6365
evaluator: Evaluator | bool = True,
66+
visualizer: Visualizer | bool = True,
6467
) -> None:
65-
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
68+
super().__init__(
69+
pre_processor=pre_processor,
70+
post_processor=post_processor,
71+
evaluator=evaluator,
72+
visualizer=visualizer,
73+
)
6674
self.model: CfaModel = CfaModel(
6775
backbone=backbone,
6876
gamma_c=gamma_c,

src/anomalib/models/image/cflow/lightning_model.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from anomalib.models.components import AnomalibModule
2828
from anomalib.post_processing import PostProcessor
2929
from anomalib.pre_processing import PreProcessor
30+
from anomalib.visualization import Visualizer
3031

3132
from .torch_model import CflowModel
3233
from .utils import get_logp, positional_encoding_2d
@@ -71,10 +72,16 @@ def __init__(
7172
permute_soft: bool = False,
7273
lr: float = 0.0001,
7374
pre_processor: PreProcessor | bool = True,
74-
post_processor: PostProcessor | None = None,
75+
post_processor: PostProcessor | bool = True,
7576
evaluator: Evaluator | bool = True,
77+
visualizer: Visualizer | bool = True,
7678
) -> None:
77-
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
79+
super().__init__(
80+
pre_processor=pre_processor,
81+
post_processor=post_processor,
82+
evaluator=evaluator,
83+
visualizer=visualizer,
84+
)
7885

7986
self.model: CflowModel = CflowModel(
8087
backbone=backbone,

0 commit comments

Comments
 (0)