7
7
import numpy as np
8
8
import pytest
9
9
import torch
10
+ import torch .fx
10
11
from common_utils import needs_cuda , cpu_and_gpu , assert_equal
11
12
from PIL import Image
12
13
from torch import nn , Tensor
13
14
from torch .autograd import gradcheck
14
15
from torch .nn .modules .utils import _pair
15
16
from torchvision import models , ops
17
+ from torchvision .models .feature_extraction import get_graph_node_names
18
+
19
+
20
+ class RoIOpTesterModuleWrapper (nn .Module ):
21
+ def __init__ (self , obj ):
22
+ super ().__init__ ()
23
+ self .layer = obj
24
+ self .n_inputs = 2
25
+
26
+ def forward (self , a , b ):
27
+ self .layer (a , b )
28
+
29
+
30
+ class MultiScaleRoIAlignModuleWrapper (nn .Module ):
31
+ def __init__ (self , obj ):
32
+ super ().__init__ ()
33
+ self .layer = obj
34
+ self .n_inputs = 3
35
+
36
+ def forward (self , a , b , c ):
37
+ self .layer (a , b , c )
38
+
39
+
40
+ class DeformConvModuleWrapper (nn .Module ):
41
+ def __init__ (self , obj ):
42
+ super ().__init__ ()
43
+ self .layer = obj
44
+ self .n_inputs = 3
45
+
46
+ def forward (self , a , b , c ):
47
+ self .layer (a , b , c )
48
+
49
+
50
+ class StochasticDepthWrapper (nn .Module ):
51
+ def __init__ (self , obj ):
52
+ super ().__init__ ()
53
+ self .layer = obj
54
+ self .n_inputs = 1
55
+
56
+ def forward (self , a ):
57
+ self .layer (a )
16
58
17
59
18
60
class RoIOpTester (ABC ):
@@ -46,6 +88,15 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwar
46
88
tol = 1e-3 if (x_dtype is torch .half or rois_dtype is torch .half ) else 1e-5
47
89
torch .testing .assert_close (gt_y .to (y ), y , rtol = tol , atol = tol )
48
90
91
+ @pytest .mark .parametrize ("device" , cpu_and_gpu ())
92
+ def test_is_leaf_node (self , device ):
93
+ op_obj = self .make_obj (wrap = True ).to (device = device )
94
+ graph_node_names = get_graph_node_names (op_obj )
95
+
96
+ assert len (graph_node_names ) == 2
97
+ assert len (graph_node_names [0 ]) == len (graph_node_names [1 ])
98
+ assert len (graph_node_names [0 ]) == 1 + op_obj .n_inputs
99
+
49
100
@pytest .mark .parametrize ("seed" , range (10 ))
50
101
@pytest .mark .parametrize ("device" , cpu_and_gpu ())
51
102
@pytest .mark .parametrize ("contiguous" , (True , False ))
@@ -91,6 +142,10 @@ def _helper_boxes_shape(self, func):
91
142
def fn (* args , ** kwargs ):
92
143
pass
93
144
145
+ @abstractmethod
146
+ def make_obj (* args , ** kwargs ):
147
+ pass
148
+
94
149
@abstractmethod
95
150
def get_script_fn (* args , ** kwargs ):
96
151
pass
@@ -104,6 +159,10 @@ class TestRoiPool(RoIOpTester):
104
159
def fn (self , x , rois , pool_h , pool_w , spatial_scale = 1 , sampling_ratio = - 1 , ** kwargs ):
105
160
return ops .RoIPool ((pool_h , pool_w ), spatial_scale )(x , rois )
106
161
162
+ def make_obj (self , pool_h = 5 , pool_w = 5 , spatial_scale = 1 , wrap = False ):
163
+ obj = ops .RoIPool ((pool_h , pool_w ), spatial_scale )
164
+ return RoIOpTesterModuleWrapper (obj ) if wrap else obj
165
+
107
166
def get_script_fn (self , rois , pool_size ):
108
167
scriped = torch .jit .script (ops .roi_pool )
109
168
return lambda x : scriped (x , rois , pool_size )
@@ -144,6 +203,10 @@ class TestPSRoIPool(RoIOpTester):
144
203
def fn (self , x , rois , pool_h , pool_w , spatial_scale = 1 , sampling_ratio = - 1 , ** kwargs ):
145
204
return ops .PSRoIPool ((pool_h , pool_w ), 1 )(x , rois )
146
205
206
+ def make_obj (self , pool_h = 5 , pool_w = 5 , spatial_scale = 1 , wrap = False ):
207
+ obj = ops .PSRoIPool ((pool_h , pool_w ), spatial_scale )
208
+ return RoIOpTesterModuleWrapper (obj ) if wrap else obj
209
+
147
210
def get_script_fn (self , rois , pool_size ):
148
211
scriped = torch .jit .script (ops .ps_roi_pool )
149
212
return lambda x : scriped (x , rois , pool_size )
@@ -223,6 +286,12 @@ def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligne
223
286
(pool_h , pool_w ), spatial_scale = spatial_scale , sampling_ratio = sampling_ratio , aligned = aligned
224
287
)(x , rois )
225
288
289
+ def make_obj (self , pool_h = 5 , pool_w = 5 , spatial_scale = 1 , sampling_ratio = - 1 , aligned = False , wrap = False ):
290
+ obj = ops .RoIAlign (
291
+ (pool_h , pool_w ), spatial_scale = spatial_scale , sampling_ratio = sampling_ratio , aligned = aligned
292
+ )
293
+ return RoIOpTesterModuleWrapper (obj ) if wrap else obj
294
+
226
295
def get_script_fn (self , rois , pool_size ):
227
296
scriped = torch .jit .script (ops .roi_align )
228
297
return lambda x : scriped (x , rois , pool_size )
@@ -374,6 +443,10 @@ class TestPSRoIAlign(RoIOpTester):
374
443
def fn (self , x , rois , pool_h , pool_w , spatial_scale = 1 , sampling_ratio = - 1 , ** kwargs ):
375
444
return ops .PSRoIAlign ((pool_h , pool_w ), spatial_scale = spatial_scale , sampling_ratio = sampling_ratio )(x , rois )
376
445
446
+ def make_obj (self , pool_h = 5 , pool_w = 5 , spatial_scale = 1 , sampling_ratio = - 1 , wrap = False ):
447
+ obj = ops .PSRoIAlign ((pool_h , pool_w ), spatial_scale = spatial_scale , sampling_ratio = sampling_ratio )
448
+ return RoIOpTesterModuleWrapper (obj ) if wrap else obj
449
+
377
450
def get_script_fn (self , rois , pool_size ):
378
451
scriped = torch .jit .script (ops .ps_roi_align )
379
452
return lambda x : scriped (x , rois , pool_size )
@@ -422,12 +495,18 @@ def test_boxes_shape(self):
422
495
423
496
424
497
class TestMultiScaleRoIAlign :
498
+ def make_obj (self , fmap_names = None , output_size = (7 , 7 ), sampling_ratio = 2 , wrap = False ):
499
+ if fmap_names is None :
500
+ fmap_names = ["0" ]
501
+ obj = ops .poolers .MultiScaleRoIAlign (fmap_names , output_size , sampling_ratio )
502
+ return MultiScaleRoIAlignModuleWrapper (obj ) if wrap else obj
503
+
425
504
def test_msroialign_repr (self ):
426
505
fmap_names = ["0" ]
427
506
output_size = (7 , 7 )
428
507
sampling_ratio = 2
429
508
# Pass mock feature map names
430
- t = ops . poolers . MultiScaleRoIAlign (fmap_names , output_size , sampling_ratio )
509
+ t = self . make_obj (fmap_names , output_size , sampling_ratio , wrap = False )
431
510
432
511
# Check integrity of object __repr__ attribute
433
512
expected_string = (
@@ -436,6 +515,15 @@ def test_msroialign_repr(self):
436
515
)
437
516
assert repr (t ) == expected_string
438
517
518
+ @pytest .mark .parametrize ("device" , cpu_and_gpu ())
519
+ def test_is_leaf_node (self , device ):
520
+ op_obj = self .make_obj (wrap = True ).to (device = device )
521
+ graph_node_names = get_graph_node_names (op_obj )
522
+
523
+ assert len (graph_node_names ) == 2
524
+ assert len (graph_node_names [0 ]) == len (graph_node_names [1 ])
525
+ assert len (graph_node_names [0 ]) == 1 + op_obj .n_inputs
526
+
439
527
440
528
class TestNMS :
441
529
def _reference_nms (self , boxes , scores , iou_threshold ):
@@ -693,6 +781,21 @@ def get_fn_args(self, device, contiguous, batch_sz, dtype):
693
781
694
782
return x , weight , offset , mask , bias , stride , pad , dilation
695
783
784
+ def make_obj (self , in_channels = 6 , out_channels = 2 , kernel_size = (3 , 2 ), groups = 2 , wrap = False ):
785
+ obj = ops .DeformConv2d (
786
+ in_channels , out_channels , kernel_size , stride = (2 , 1 ), padding = (1 , 0 ), dilation = (2 , 1 ), groups = groups
787
+ )
788
+ return DeformConvModuleWrapper (obj ) if wrap else obj
789
+
790
+ @pytest .mark .parametrize ("device" , cpu_and_gpu ())
791
+ def test_is_leaf_node (self , device ):
792
+ op_obj = self .make_obj (wrap = True ).to (device = device )
793
+ graph_node_names = get_graph_node_names (op_obj )
794
+
795
+ assert len (graph_node_names ) == 2
796
+ assert len (graph_node_names [0 ]) == len (graph_node_names [1 ])
797
+ assert len (graph_node_names [0 ]) == 1 + op_obj .n_inputs
798
+
696
799
@pytest .mark .parametrize ("device" , cpu_and_gpu ())
697
800
@pytest .mark .parametrize ("contiguous" , (True , False ))
698
801
@pytest .mark .parametrize ("batch_sz" , (0 , 33 ))
@@ -705,9 +808,9 @@ def test_forward(self, device, contiguous, batch_sz, dtype=None):
705
808
groups = 2
706
809
tol = 2e-3 if dtype is torch .half else 1e-5
707
810
708
- layer = ops . DeformConv2d (
709
- in_channels , out_channels , kernel_size , stride = stride , padding = padding , dilation = dilation , groups = groups
710
- ). to ( device = x . device , dtype = dtype )
811
+ layer = self . make_obj ( in_channels , out_channels , kernel_size , groups , wrap = False ). to (
812
+ device = x . device , dtype = dtype
813
+ )
711
814
res = layer (x , offset , mask )
712
815
713
816
weight = layer .weight .data
@@ -1200,6 +1303,20 @@ def test_stochastic_depth(self, seed, mode, p):
1200
1303
elif p == 1 :
1201
1304
assert out .equal (torch .zeros_like (x ))
1202
1305
1306
+ def make_obj (self , p , mode , wrap = False ):
1307
+ obj = ops .StochasticDepth (p , mode )
1308
+ return StochasticDepthWrapper (obj ) if wrap else obj
1309
+
1310
+ @pytest .mark .parametrize ("p" , (0 , 1 ))
1311
+ @pytest .mark .parametrize ("mode" , ["batch" , "row" ])
1312
+ def test_is_leaf_node (self , p , mode ):
1313
+ op_obj = self .make_obj (p , mode , wrap = True )
1314
+ graph_node_names = get_graph_node_names (op_obj )
1315
+
1316
+ assert len (graph_node_names ) == 2
1317
+ assert len (graph_node_names [0 ]) == len (graph_node_names [1 ])
1318
+ assert len (graph_node_names [0 ]) == 1 + op_obj .n_inputs
1319
+
1203
1320
1204
1321
class TestUtils :
1205
1322
@pytest .mark .parametrize ("norm_layer" , [None , nn .BatchNorm2d , nn .LayerNorm ])
0 commit comments