|
| 1 | +# Copyright (C) 2025 Intel Corporation |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +"""Unit tests for the WinCLIP torch model.""" |
| 5 | + |
| 6 | +import numpy as np |
| 7 | +import pytest |
| 8 | +import torch |
| 9 | +from _pytest.monkeypatch import MonkeyPatch |
| 10 | + |
| 11 | +from anomalib.models.image.anomaly_dino.torch_model import AnomalyDINOModel |
| 12 | + |
| 13 | + |
| 14 | +class TestAnomalyDINOModel: |
| 15 | + """Test the AnomalyDINO torch model.""" |
| 16 | + |
| 17 | + @staticmethod |
| 18 | + def test_initialization_defaults() -> None: |
| 19 | + """Test initialization with default arguments.""" |
| 20 | + model = AnomalyDINOModel() |
| 21 | + assert model.encoder_name.startswith("dinov2") |
| 22 | + assert model.memory_bank.numel() == 0 |
| 23 | + |
| 24 | + @staticmethod |
| 25 | + def test_invalid_encoder_name_raises() -> None: |
| 26 | + """Test that invalid encoder names raise an error.""" |
| 27 | + with pytest.raises(ValueError, match="Encoder must be dinov2"): |
| 28 | + _ = AnomalyDINOModel(encoder_name="resnet50") |
| 29 | + |
| 30 | + @staticmethod |
| 31 | + def test_fit_raises_without_embeddings() -> None: |
| 32 | + """Test that fit raises when no embeddings have been collected.""" |
| 33 | + model = AnomalyDINOModel() |
| 34 | + with pytest.raises(ValueError, match="No embeddings collected"): |
| 35 | + model.fit() |
| 36 | + |
| 37 | + @staticmethod |
| 38 | + def test_forward_train_adds_embeddings(monkeypatch: MonkeyPatch) -> None: |
| 39 | + """Test training mode collects embeddings into store.""" |
| 40 | + model = AnomalyDINOModel() |
| 41 | + model.train() |
| 42 | + |
| 43 | + fake_features = torch.randn(2, 8, 128) |
| 44 | + monkeypatch.setattr(model, "extract_features", lambda _: fake_features) |
| 45 | + |
| 46 | + x = torch.randn(2, 3, 224, 224) |
| 47 | + output = model(x) |
| 48 | + assert torch.is_tensor(output) |
| 49 | + assert output.requires_grad |
| 50 | + assert len(model.embedding_store) == 1 |
| 51 | + assert model.embedding_store[0].ndim == 2 |
| 52 | + |
| 53 | + @staticmethod |
| 54 | + def test_forward_eval_raises_with_empty_memory_bank(monkeypatch: MonkeyPatch) -> None: |
| 55 | + """Test that inference raises an error when memory bank is empty.""" |
| 56 | + model = AnomalyDINOModel() |
| 57 | + model.eval() |
| 58 | + |
| 59 | + fake_features = torch.randn(1, 16, 64) |
| 60 | + monkeypatch.setattr(model, "extract_features", lambda _: fake_features) |
| 61 | + model.register_buffer("memory_bank", torch.empty(0, 64)) |
| 62 | + |
| 63 | + x = torch.randn(1, 3, 224, 224) |
| 64 | + with pytest.raises(RuntimeError, match="Memory bank is empty"): |
| 65 | + _ = model(x) |
| 66 | + |
| 67 | + @staticmethod |
| 68 | + def test_compute_background_masks_runs() -> None: |
| 69 | + """Test that background mask computation produces boolean masks.""" |
| 70 | + b, h, w, d = 2, 8, 8, 16 |
| 71 | + features = np.random.randn(b, h * w, d).astype(np.float32) # noqa: NPY002 |
| 72 | + masks = AnomalyDINOModel.compute_background_masks(features, (h, w)) |
| 73 | + assert masks.shape == (b, h * w) |
| 74 | + assert masks.dtype == bool |
| 75 | + |
| 76 | + @staticmethod |
| 77 | + def test_mean_top1p_computation() -> None: |
| 78 | + """Test that mean_top1p returns expected shape and value.""" |
| 79 | + distances = torch.arange(0, 100, dtype=torch.float32).view(1, -1) |
| 80 | + result = AnomalyDINOModel.mean_top1p(distances) |
| 81 | + assert result.shape == (1, 1) |
| 82 | + assert torch.allclose(result, torch.tensor([[99.0]])) |
| 83 | + |
| 84 | + @staticmethod |
| 85 | + def test_forward_half_precision_eval(monkeypatch: MonkeyPatch) -> None: |
| 86 | + """Test inference in half precision (float16) using matmul cosine distance.""" |
| 87 | + model = AnomalyDINOModel().half() |
| 88 | + model.eval() |
| 89 | + |
| 90 | + fake_features = torch.randn(1, 16, 64, dtype=torch.float16) |
| 91 | + monkeypatch.setattr(model, "extract_features", lambda _: fake_features) |
| 92 | + monkeypatch.setattr(model.anomaly_map_generator, "__call__", lambda x, __: x) |
| 93 | + |
| 94 | + model.register_buffer("memory_bank", torch.randn(16, 64, dtype=torch.float16)) |
| 95 | + x = torch.randn(1, 3, 224, 224, dtype=torch.float16) |
| 96 | + out = model(x) |
| 97 | + |
| 98 | + assert hasattr(out, "pred_score") |
| 99 | + assert out.pred_score.shape == (1, 1) |
| 100 | + # outputs should be float16-safe with matmul |
| 101 | + assert out.pred_score.dtype == torch.float16 |
0 commit comments