Skip to content

Commit b219315

Browse files
authored
Merge branch 'main' into malfet-patch-1
2 parents f29ec8e + 7f328e1 commit b219315

File tree

2 files changed

+69
-87
lines changed

2 files changed

+69
-87
lines changed

test/test_models.py

Lines changed: 40 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
import torch
1111
import torch.fx
1212
import torch.nn as nn
13-
import torchvision
1413
from _utils_internal import get_relative_path
1514
from common_utils import map_nested_tensor_object, freeze_rng_state, set_rng_seed, cpu_and_gpu, needs_cuda
1615
from torchvision import models
@@ -19,29 +18,9 @@
1918
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
2019

2120

22-
def get_available_classification_models():
21+
def get_models_from_module(module):
2322
# TODO add a registration mechanism to torchvision.models
24-
return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
25-
26-
27-
def get_available_segmentation_models():
28-
# TODO add a registration mechanism to torchvision.models
29-
return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
30-
31-
32-
def get_available_detection_models():
33-
# TODO add a registration mechanism to torchvision.models
34-
return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
35-
36-
37-
def get_available_video_models():
38-
# TODO add a registration mechanism to torchvision.models
39-
return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
40-
41-
42-
def get_available_quantizable_models():
43-
# TODO add a registration mechanism to torchvision.models
44-
return [k for k, v in models.quantization.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
23+
return [v for k, v in module.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
4524

4625

4726
def _get_expected_file(name=None):
@@ -314,20 +293,20 @@ def _make_sliced_model(model, stop_layer):
314293
return new_model
315294

316295

317-
@pytest.mark.parametrize("model_name", ["densenet121", "densenet169", "densenet201", "densenet161"])
318-
def test_memory_efficient_densenet(model_name):
296+
@pytest.mark.parametrize("model_fn", [models.densenet121, models.densenet169, models.densenet201, models.densenet161])
297+
def test_memory_efficient_densenet(model_fn):
319298
input_shape = (1, 3, 300, 300)
320299
x = torch.rand(input_shape)
321300

322-
model1 = models.__dict__[model_name](num_classes=50, memory_efficient=True)
301+
model1 = model_fn(num_classes=50, memory_efficient=True)
323302
params = model1.state_dict()
324303
num_params = sum([x.numel() for x in model1.parameters()])
325304
model1.eval()
326305
out1 = model1(x)
327306
out1.sum().backward()
328307
num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])
329308

330-
model2 = models.__dict__[model_name](num_classes=50, memory_efficient=False)
309+
model2 = model_fn(num_classes=50, memory_efficient=False)
331310
model2.load_state_dict(params)
332311
model2.eval()
333312
out2 = model2(x)
@@ -344,7 +323,7 @@ def test_memory_efficient_densenet(model_name):
344323
@pytest.mark.parametrize("dilate_layer_4", (True, False))
345324
def test_resnet_dilation(dilate_layer_2, dilate_layer_3, dilate_layer_4):
346325
# TODO improve tests to also check that each layer has the right dimensionality
347-
model = models.__dict__["resnet50"](replace_stride_with_dilation=(dilate_layer_2, dilate_layer_3, dilate_layer_4))
326+
model = models.resnet50(replace_stride_with_dilation=(dilate_layer_2, dilate_layer_3, dilate_layer_4))
348327
model = _make_sliced_model(model, stop_layer="layer4")
349328
model.eval()
350329
x = torch.rand(1, 3, 224, 224)
@@ -354,22 +333,22 @@ def test_resnet_dilation(dilate_layer_2, dilate_layer_3, dilate_layer_4):
354333

355334

356335
def test_mobilenet_v2_residual_setting():
357-
model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
336+
model = models.mobilenet_v2(inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
358337
model.eval()
359338
x = torch.rand(1, 3, 224, 224)
360339
out = model(x)
361340
assert out.shape[-1] == 1000
362341

363342

364-
@pytest.mark.parametrize("model_name", ["mobilenet_v2", "mobilenet_v3_large", "mobilenet_v3_small"])
365-
def test_mobilenet_norm_layer(model_name):
366-
model = models.__dict__[model_name]()
343+
@pytest.mark.parametrize("model_fn", [models.mobilenet_v2, models.mobilenet_v3_large, models.mobilenet_v3_small])
344+
def test_mobilenet_norm_layer(model_fn):
345+
model = model_fn()
367346
assert any(isinstance(x, nn.BatchNorm2d) for x in model.modules())
368347

369348
def get_gn(num_channels):
370349
return nn.GroupNorm(32, num_channels)
371350

372-
model = models.__dict__[model_name](norm_layer=get_gn)
351+
model = model_fn(norm_layer=get_gn)
373352
assert not (any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
374353
assert any(isinstance(x, nn.GroupNorm) for x in model.modules())
375354

@@ -478,18 +457,19 @@ def test_generalizedrcnn_transform_repr():
478457
assert t.__repr__() == expected_string
479458

480459

481-
@pytest.mark.parametrize("model_name", get_available_classification_models())
460+
@pytest.mark.parametrize("model_fn", get_models_from_module(models))
482461
@pytest.mark.parametrize("dev", cpu_and_gpu())
483-
def test_classification_model(model_name, dev):
462+
def test_classification_model(model_fn, dev):
484463
set_rng_seed(0)
485464
defaults = {
486465
"num_classes": 50,
487466
"input_shape": (1, 3, 224, 224),
488467
}
468+
model_name = model_fn.__name__
489469
kwargs = {**defaults, **_model_params.get(model_name, {})}
490470
input_shape = kwargs.pop("input_shape")
491471

492-
model = models.__dict__[model_name](**kwargs)
472+
model = model_fn(**kwargs)
493473
model.eval().to(device=dev)
494474
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
495475
x = torch.rand(input_shape).to(device=dev)
@@ -510,19 +490,20 @@ def test_classification_model(model_name, dev):
510490
_check_input_backprop(model, x)
511491

512492

513-
@pytest.mark.parametrize("model_name", get_available_segmentation_models())
493+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.segmentation))
514494
@pytest.mark.parametrize("dev", cpu_and_gpu())
515-
def test_segmentation_model(model_name, dev):
495+
def test_segmentation_model(model_fn, dev):
516496
set_rng_seed(0)
517497
defaults = {
518498
"num_classes": 10,
519499
"pretrained_backbone": False,
520500
"input_shape": (1, 3, 32, 32),
521501
}
502+
model_name = model_fn.__name__
522503
kwargs = {**defaults, **_model_params.get(model_name, {})}
523504
input_shape = kwargs.pop("input_shape")
524505

525-
model = models.segmentation.__dict__[model_name](**kwargs)
506+
model = model_fn(**kwargs)
526507
model.eval().to(device=dev)
527508
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
528509
x = torch.rand(input_shape).to(device=dev)
@@ -571,19 +552,20 @@ def check_out(out):
571552
_check_input_backprop(model, x)
572553

573554

574-
@pytest.mark.parametrize("model_name", get_available_detection_models())
555+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
575556
@pytest.mark.parametrize("dev", cpu_and_gpu())
576-
def test_detection_model(model_name, dev):
557+
def test_detection_model(model_fn, dev):
577558
set_rng_seed(0)
578559
defaults = {
579560
"num_classes": 50,
580561
"pretrained_backbone": False,
581562
"input_shape": (3, 300, 300),
582563
}
564+
model_name = model_fn.__name__
583565
kwargs = {**defaults, **_model_params.get(model_name, {})}
584566
input_shape = kwargs.pop("input_shape")
585567

586-
model = models.detection.__dict__[model_name](**kwargs)
568+
model = model_fn(**kwargs)
587569
model.eval().to(device=dev)
588570
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
589571
x = torch.rand(input_shape).to(device=dev)
@@ -667,10 +649,10 @@ def compute_mean_std(tensor):
667649
_check_input_backprop(model, model_input)
668650

669651

670-
@pytest.mark.parametrize("model_name", get_available_detection_models())
671-
def test_detection_model_validation(model_name):
652+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
653+
def test_detection_model_validation(model_fn):
672654
set_rng_seed(0)
673-
model = models.detection.__dict__[model_name](num_classes=50, pretrained_backbone=False)
655+
model = model_fn(num_classes=50, pretrained_backbone=False)
674656
input_shape = (3, 300, 300)
675657
x = [torch.rand(input_shape)]
676658

@@ -696,14 +678,15 @@ def test_detection_model_validation(model_name):
696678
model(x, targets=targets)
697679

698680

699-
@pytest.mark.parametrize("model_name", get_available_video_models())
681+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.video))
700682
@pytest.mark.parametrize("dev", cpu_and_gpu())
701-
def test_video_model(model_name, dev):
683+
def test_video_model(model_fn, dev):
702684
# the default input shape is
703685
# bs * num_channels * clip_len * h *w
704686
input_shape = (1, 3, 4, 112, 112)
687+
model_name = model_fn.__name__
705688
# test both basicblock and Bottleneck
706-
model = models.video.__dict__[model_name](num_classes=50)
689+
model = model_fn(num_classes=50)
707690
model.eval().to(device=dev)
708691
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
709692
x = torch.rand(input_shape).to(device=dev)
@@ -727,20 +710,21 @@ def test_video_model(model_name, dev):
727710
),
728711
reason="This Pytorch Build has not been built with fbgemm and qnnpack",
729712
)
730-
@pytest.mark.parametrize("model_name", get_available_quantizable_models())
731-
def test_quantized_classification_model(model_name):
713+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.quantization))
714+
def test_quantized_classification_model(model_fn):
732715
set_rng_seed(0)
733716
defaults = {
734717
"num_classes": 5,
735718
"input_shape": (1, 3, 224, 224),
736719
"pretrained": False,
737720
"quantize": True,
738721
}
722+
model_name = model_fn.__name__
739723
kwargs = {**defaults, **_model_params.get(model_name, {})}
740724
input_shape = kwargs.pop("input_shape")
741725

742726
# First check if quantize=True provides models that can run with input data
743-
model = torchvision.models.quantization.__dict__[model_name](**kwargs)
727+
model = model_fn(**kwargs)
744728
model.eval()
745729
x = torch.rand(input_shape)
746730
out = model(x)
@@ -753,7 +737,7 @@ def test_quantized_classification_model(model_name):
753737

754738
kwargs["quantize"] = False
755739
for eval_mode in [True, False]:
756-
model = torchvision.models.quantization.__dict__[model_name](**kwargs)
740+
model = model_fn(**kwargs)
757741
if eval_mode:
758742
model.eval()
759743
model.qconfig = torch.quantization.default_qconfig
@@ -777,14 +761,13 @@ def test_quantized_classification_model(model_name):
777761
raise AssertionError(f"model cannot be scripted. Traceback = {str(tb)}") from e
778762

779763

780-
@pytest.mark.parametrize("model_name", get_available_detection_models())
781-
def test_detection_model_trainable_backbone_layers(model_name):
764+
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
765+
def test_detection_model_trainable_backbone_layers(model_fn):
766+
model_name = model_fn.__name__
782767
max_trainable = _model_tests_values[model_name]["max_trainable"]
783768
n_trainable_params = []
784769
for trainable_layers in range(0, max_trainable + 1):
785-
model = torchvision.models.detection.__dict__[model_name](
786-
pretrained=False, pretrained_backbone=True, trainable_backbone_layers=trainable_layers
787-
)
770+
model = model_fn(pretrained=False, pretrained_backbone=True, trainable_backbone_layers=trainable_layers)
788771

789772
n_trainable_params.append(len([p for p in model.parameters() if p.requires_grad]))
790773
assert n_trainable_params == _model_tests_values[model_name]["n_trn_params_per_layer"]

test/test_prototype_models.py

Lines changed: 29 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,28 @@
1+
import importlib
12
import os
23

34
import pytest
5+
import test_models as TM
46
import torch
5-
from common_utils import set_rng_seed, cpu_and_gpu
6-
from test_models import _assert_expected, _model_params
7-
from torchvision import models as original_models
7+
from common_utils import cpu_and_gpu
88
from torchvision.prototype import models
99

1010

11-
def get_available_classification_models():
12-
return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
11+
def _get_original_model(model_fn):
12+
original_module_name = model_fn.__module__.replace(".prototype", "")
13+
module = importlib.import_module(original_module_name)
14+
return module.__dict__[model_fn.__name__]
15+
16+
17+
def _build_model(fn, **kwargs):
18+
try:
19+
model = fn(**kwargs)
20+
except ValueError as e:
21+
msg = str(e)
22+
if "No checkpoint is available" in msg:
23+
pytest.skip(msg)
24+
raise e
25+
return model.eval()
1326

1427

1528
def test_get_weight():
@@ -18,44 +31,30 @@ def test_get_weight():
1831
assert models._api.get_weight(fn, weight_name) == models.ResNet50Weights.ImageNet1K_RefV2
1932

2033

21-
@pytest.mark.parametrize("model_name", get_available_classification_models())
34+
@pytest.mark.parametrize("model_fn", TM.get_models_from_module(models))
2235
@pytest.mark.parametrize("dev", cpu_and_gpu())
2336
@pytest.mark.skipif(os.getenv("PYTORCH_TEST_WITH_PROTOTYPE", "0") == "0", reason="Prototype code tests are disabled")
24-
def test_classification_model(model_name, dev):
25-
set_rng_seed(0)
26-
defaults = {
27-
"num_classes": 50,
28-
"input_shape": (1, 3, 224, 224),
29-
}
30-
kwargs = {**defaults, **_model_params.get(model_name, {})}
31-
input_shape = kwargs.pop("input_shape")
32-
model = models.__dict__[model_name](**kwargs)
33-
model.eval().to(device=dev)
34-
x = torch.rand(input_shape).to(device=dev)
35-
out = model(x)
36-
_assert_expected(out.cpu(), model_name, prec=0.1)
37-
assert out.shape[-1] == 50
37+
def test_classification_model(model_fn, dev):
38+
TM.test_classification_model(model_fn, dev)
3839

3940

40-
@pytest.mark.parametrize("model_name", get_available_classification_models())
41+
@pytest.mark.parametrize("model_fn", TM.get_models_from_module(models))
4142
@pytest.mark.parametrize("dev", cpu_and_gpu())
4243
@pytest.mark.skipif(os.getenv("PYTORCH_TEST_WITH_PROTOTYPE", "0") == "0", reason="Prototype code tests are disabled")
43-
def test_old_vs_new_classification_factory(model_name, dev):
44+
def test_old_vs_new_classification_factory(model_fn, dev):
4445
defaults = {
4546
"pretrained": True,
4647
"input_shape": (1, 3, 224, 224),
4748
}
48-
kwargs = {**defaults, **_model_params.get(model_name, {})}
49+
model_name = model_fn.__name__
50+
kwargs = {**defaults, **TM._model_params.get(model_name, {})}
4951
input_shape = kwargs.pop("input_shape")
50-
model_old = original_models.__dict__[model_name](**kwargs)
51-
model_old.eval().to(device=dev)
5252
x = torch.rand(input_shape).to(device=dev)
53-
out_old = model_old(x)
53+
5454
# compare with new model builder parameterized in the old fashion way
55-
model_new = models.__dict__[model_name](**kwargs)
56-
model_new.eval().to(device=dev)
57-
out_new = model_new(x)
58-
torch.testing.assert_close(out_new, out_old, rtol=0.0, atol=0.0, check_dtype=False)
55+
model_old = _build_model(_get_original_model(model_fn), **kwargs).to(device=dev)
56+
model_new = _build_model(model_fn, **kwargs).to(device=dev)
57+
torch.testing.assert_close(model_new(x), model_old(x), rtol=0.0, atol=0.0, check_dtype=False)
5958

6059

6160
def test_smoke():

0 commit comments

Comments
 (0)