Skip to content

Commit c7f6efb

Browse files
committed
adjust parameters
1 parent 879cb39 commit c7f6efb

File tree

3 files changed

+34
-34
lines changed

3 files changed

+34
-34
lines changed

test/common_utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ def make_bounding_boxes(
406406
canvas_size=DEFAULT_SIZE,
407407
*,
408408
format=tv_tensors.BoundingBoxFormat.XYXY,
409-
num_objects=1,
409+
num_boxes=1,
410410
dtype=None,
411411
device="cpu",
412412
):
@@ -420,7 +420,7 @@ def sample_position(values, max_value):
420420

421421
dtype = dtype or torch.float32
422422

423-
h, w = [torch.randint(1, s, (num_objects,)) for s in canvas_size]
423+
h, w = [torch.randint(1, s, (num_boxes,)) for s in canvas_size]
424424
y = sample_position(h, canvas_size[0])
425425
x = sample_position(w, canvas_size[1])
426426

@@ -443,11 +443,11 @@ def sample_position(values, max_value):
443443
)
444444

445445

446-
def make_detection_mask(size=DEFAULT_SIZE, *, num_objects=1, dtype=None, device="cpu"):
446+
def make_detection_masks(size=DEFAULT_SIZE, *, num_masks=1, dtype=None, device="cpu"):
447447
"""Make a "detection" mask, i.e. (*, N, H, W), where each object is encoded as one of N boolean masks"""
448448
return tv_tensors.Mask(
449449
torch.testing.make_tensor(
450-
(num_objects, *size),
450+
(num_masks, *size),
451451
low=0,
452452
high=2,
453453
dtype=dtype or torch.bool,

test/test_transforms_v2.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
freeze_rng_state,
2727
ignore_jit_no_profile_information_warning,
2828
make_bounding_boxes,
29-
make_detection_mask,
29+
make_detection_masks,
3030
make_image,
3131
make_image_pil,
3232
make_image_tensor,
@@ -315,7 +315,7 @@ def _make_transform_sample(transform, *, image_or_video, adapter):
315315
canvas_size=size,
316316
device=device,
317317
),
318-
detection_mask=make_detection_mask(size, device=device),
318+
detection_mask=make_detection_masks(size, device=device),
319319
segmentation_mask=make_segmentation_mask(size, device=device),
320320
int=0,
321321
float=0.0,
@@ -637,7 +637,7 @@ def test_kernel_bounding_boxes(self, format, size, use_max_size, dtype, device):
637637
check_scripted_vs_eager=not isinstance(size, int),
638638
)
639639

640-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
640+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
641641
def test_kernel_mask(self, make_mask):
642642
check_kernel(F.resize_mask, make_mask(self.INPUT_SIZE), size=self.OUTPUT_SIZES[-1])
643643

@@ -682,7 +682,7 @@ def test_functional_signature(self, kernel, input_type):
682682
make_image,
683683
make_bounding_boxes,
684684
make_segmentation_mask,
685-
make_detection_mask,
685+
make_detection_masks,
686686
make_video,
687687
],
688688
)
@@ -788,7 +788,7 @@ def test_functional_pil_antialias_warning(self):
788788
make_image,
789789
make_bounding_boxes,
790790
make_segmentation_mask,
791-
make_detection_mask,
791+
make_detection_masks,
792792
make_video,
793793
],
794794
)
@@ -840,7 +840,7 @@ def test_transform_unknown_size_error(self):
840840
make_image,
841841
make_bounding_boxes,
842842
make_segmentation_mask,
843-
make_detection_mask,
843+
make_detection_masks,
844844
make_video,
845845
],
846846
)
@@ -867,7 +867,7 @@ def test_noop(self, size, make_input):
867867
make_image,
868868
make_bounding_boxes,
869869
make_segmentation_mask,
870-
make_detection_mask,
870+
make_detection_masks,
871871
make_video,
872872
],
873873
)
@@ -962,7 +962,7 @@ def test_kernel_bounding_boxes(self, format, dtype, device):
962962
canvas_size=bounding_boxes.canvas_size,
963963
)
964964

965-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
965+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
966966
def test_kernel_mask(self, make_mask):
967967
check_kernel(F.horizontal_flip_mask, make_mask())
968968

@@ -1130,7 +1130,7 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device):
11301130
check_scripted_vs_eager=not (param == "shear" and isinstance(value, (int, float))),
11311131
)
11321132

1133-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
1133+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
11341134
def test_kernel_mask(self, make_mask):
11351135
self._check_kernel(F.affine_mask, make_mask())
11361136

@@ -1412,7 +1412,7 @@ def test_kernel_bounding_boxes(self, format, dtype, device):
14121412
canvas_size=bounding_boxes.canvas_size,
14131413
)
14141414

1415-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
1415+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
14161416
def test_kernel_mask(self, make_mask):
14171417
check_kernel(F.vertical_flip_mask, make_mask())
14181418

@@ -1554,7 +1554,7 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device):
15541554
**kwargs,
15551555
)
15561556

1557-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
1557+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
15581558
def test_kernel_mask(self, make_mask):
15591559
check_kernel(F.rotate_mask, make_mask(), **self._MINIMAL_AFFINE_KWARGS)
15601560

@@ -2044,7 +2044,7 @@ def make_inpt_with_bbox_and_mask(self, make_input):
20442044
sample = {
20452045
"inpt": make_input(size=(H, W), dtype=inpt_dtype),
20462046
"bbox": make_bounding_boxes(canvas_size=(H, W), dtype=bbox_dtype),
2047-
"mask": make_detection_mask(size=(H, W), dtype=mask_dtype),
2047+
"mask": make_detection_masks(size=(H, W), dtype=mask_dtype),
20482048
}
20492049

20502050
return sample, inpt_dtype, bbox_dtype, mask_dtype
@@ -2330,7 +2330,7 @@ def test_get_num_channels(self, kernel, make_input):
23302330
(F._get_size_image_pil, make_image_pil),
23312331
(F.get_size_image, make_image),
23322332
(F.get_size_bounding_boxes, make_bounding_boxes),
2333-
(F.get_size_mask, make_detection_mask),
2333+
(F.get_size_mask, make_detection_masks),
23342334
(F.get_size_mask, make_segmentation_mask),
23352335
(F.get_size_video, make_video),
23362336
],
@@ -2360,15 +2360,15 @@ def test_get_num_frames(self, kernel, make_input):
23602360
("functional", "make_input"),
23612361
[
23622362
(F.get_dimensions, make_bounding_boxes),
2363-
(F.get_dimensions, make_detection_mask),
2363+
(F.get_dimensions, make_detection_masks),
23642364
(F.get_dimensions, make_segmentation_mask),
23652365
(F.get_num_channels, make_bounding_boxes),
2366-
(F.get_num_channels, make_detection_mask),
2366+
(F.get_num_channels, make_detection_masks),
23672367
(F.get_num_channels, make_segmentation_mask),
23682368
(F.get_num_frames, make_image_pil),
23692369
(F.get_num_frames, make_image),
23702370
(F.get_num_frames, make_bounding_boxes),
2371-
(F.get_num_frames, make_detection_mask),
2371+
(F.get_num_frames, make_detection_masks),
23722372
(F.get_num_frames, make_segmentation_mask),
23732373
],
23742374
)
@@ -2617,7 +2617,7 @@ def test_kernel_bounding_boxes(self, format, dtype, device):
26172617
displacement=self._make_displacement(bounding_boxes),
26182618
)
26192619

2620-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
2620+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
26212621
def test_kernel_mask(self, make_mask):
26222622
mask = make_mask()
26232623
check_kernel(F.elastic_mask, mask, displacement=self._make_displacement(mask))
@@ -2683,7 +2683,7 @@ def test_correctness(self):
26832683
"img": make_image(),
26842684
"img_tensor": make_image_tensor(),
26852685
"img_pil": make_image_pil(),
2686-
"mask": make_detection_mask(),
2686+
"mask": make_detection_masks(),
26872687
"video": make_video(),
26882688
"bbox": make_bounding_boxes(),
26892689
"str": "str",
@@ -2733,7 +2733,7 @@ def test_kernel_bounding_box(self, kwargs, format, dtype, device):
27332733
bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format, dtype=dtype, device=device)
27342734
check_kernel(F.crop_bounding_boxes, bounding_boxes, format=format, **kwargs)
27352735

2736-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
2736+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
27372737
def test_kernel_mask(self, make_mask):
27382738
check_kernel(F.crop_mask, make_mask(self.INPUT_SIZE), **self.MINIMAL_CROP_KWARGS)
27392739

@@ -3448,7 +3448,7 @@ class TestResizedCrop:
34483448
(F.resized_crop_image, make_image),
34493449
(F.resized_crop_bounding_boxes, make_bounding_boxes),
34503450
(F.resized_crop_mask, make_segmentation_mask),
3451-
(F.resized_crop_mask, make_detection_mask),
3451+
(F.resized_crop_mask, make_detection_masks),
34523452
(F.resized_crop_video, make_video),
34533453
],
34543454
)
@@ -3634,7 +3634,7 @@ def test_kernel_bounding_boxes_errors(self, padding_mode):
36343634
padding_mode=padding_mode,
36353635
)
36363636

3637-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
3637+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
36383638
def test_kernel_mask(self, make_mask):
36393639
check_kernel(F.pad_mask, make_mask(), padding=[1])
36403640

@@ -3771,7 +3771,7 @@ def test_kernel_bounding_boxes(self, output_size, format):
37713771
check_scripted_vs_eager=not isinstance(output_size, int),
37723772
)
37733773

3774-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
3774+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
37753775
def test_kernel_mask(self, make_mask):
37763776
check_kernel(F.center_crop_mask, make_mask(), output_size=self.OUTPUT_SIZES[0])
37773777

@@ -3937,7 +3937,7 @@ def test_kernel_bounding_boxes_error(self):
39373937
coefficients=[0.0] * 8,
39383938
)
39393939

3940-
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_mask])
3940+
@pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks])
39413941
def test_kernel_mask(self, make_mask):
39423942
check_kernel(F.perspective_mask, make_mask(), **self.MINIMAL_KWARGS)
39433943

@@ -4801,7 +4801,7 @@ def test_transform(self, make_input, transform_cls):
48014801
check_sample_input=False,
48024802
)
48034803

4804-
@pytest.mark.parametrize("make_input", [make_bounding_boxes, make_detection_mask])
4804+
@pytest.mark.parametrize("make_input", [make_bounding_boxes, make_detection_masks])
48054805
@pytest.mark.parametrize("transform_cls", [transforms.FiveCrop, transforms.TenCrop])
48064806
def test_transform_error(self, make_input, transform_cls):
48074807
transform = transform_cls(size=self.OUTPUT_SIZE)
@@ -4968,7 +4968,7 @@ class TestRandomZoomOut:
49684968
make_image,
49694969
make_bounding_boxes,
49704970
make_segmentation_mask,
4971-
make_detection_mask,
4971+
make_detection_masks,
49724972
make_video,
49734973
],
49744974
)
@@ -4995,7 +4995,7 @@ def test_transform_error(self):
49954995
make_image,
49964996
make_bounding_boxes,
49974997
make_segmentation_mask,
4998-
make_detection_mask,
4998+
make_detection_masks,
49994999
make_video,
50005000
],
50015001
)
@@ -5388,8 +5388,8 @@ def test__transform(self, mocker):
53885388

53895389
size = (32, 24)
53905390
image = make_image(size)
5391-
bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, num_objects=6)
5392-
masks = make_detection_mask(size, num_objects=6)
5391+
bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, num_boxes=6)
5392+
masks = make_detection_masks(size, num_masks=6)
53935393

53945394
sample = [image, bboxes, masks]
53955395

test/test_transforms_v2_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import torch
55

66
import torchvision.transforms.v2._utils
7-
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
7+
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_masks, make_image
88

99
from torchvision import tv_tensors
1010
from torchvision.transforms.v2._utils import has_all, has_any
@@ -13,7 +13,7 @@
1313

1414
IMAGE = make_image(DEFAULT_SIZE, color_space="RGB")
1515
BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=tv_tensors.BoundingBoxFormat.XYXY)
16-
MASK = make_detection_mask(DEFAULT_SIZE)
16+
MASK = make_detection_masks(DEFAULT_SIZE)
1717

1818

1919
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)