Skip to content

Commit 98e8eb8

Browse files
committed
update unit test
1 parent 7fb6c3e commit 98e8eb8

File tree

1 file changed

+4
-36
lines changed

1 file changed

+4
-36
lines changed

tests/torchtune/models/clip/test_clip_image_transform.py

Lines changed: 4 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -37,17 +37,6 @@ class TestCLIPImageTransform:
3737
"expected_tile_max": [1.0, 1.0],
3838
"expected_tile_min": [0.0, 0.0],
3939
"expected_aspect_ratio": [1, 2],
40-
"pad_max_tiles": False,
41-
},
42-
{
43-
"image_size": (100, 400, 3),
44-
"expected_shape": torch.Size([4, 3, 224, 224]),
45-
"resize_to_max_canvas": False,
46-
"expected_tile_means": [0.2230, 0.1763, 0.0, 0.0],
47-
"expected_tile_max": [1.0, 1.0, 0.0, 0.0],
48-
"expected_tile_min": [0.0, 0.0, 0.0, 0.0],
49-
"expected_aspect_ratio": [1, 2],
50-
"pad_max_tiles": True,
5140
},
5241
{
5342
"image_size": (1000, 300, 3),
@@ -57,7 +46,6 @@ class TestCLIPImageTransform:
5746
"expected_tile_max": [0.9705, 0.9694, 0.9521, 0.9314],
5847
"expected_tile_min": [0.0353, 0.0435, 0.0528, 0.0],
5948
"expected_aspect_ratio": [4, 1],
60-
"pad_max_tiles": False,
6149
},
6250
{
6351
"image_size": (200, 200, 3),
@@ -67,7 +55,6 @@ class TestCLIPImageTransform:
6755
"expected_tile_max": [0.9922, 0.9926, 0.9970, 0.9908],
6856
"expected_tile_min": [0.0056, 0.0069, 0.0059, 0.0033],
6957
"expected_aspect_ratio": [2, 2],
70-
"pad_max_tiles": False,
7158
"pad_tiles": 1,
7259
},
7360
{
@@ -78,17 +65,6 @@ class TestCLIPImageTransform:
7865
"expected_tile_max": [1.0, 1.0, 1.0],
7966
"expected_tile_min": [0.0, 0.0, 0.0],
8067
"expected_aspect_ratio": [3, 1],
81-
"pad_max_tiles": False,
82-
},
83-
{
84-
"image_size": (600, 200, 3),
85-
"expected_shape": torch.Size([4, 3, 224, 224]),
86-
"resize_to_max_canvas": False,
87-
"expected_tile_means": [0.4473, 0.4469, 0.3032, 0.0],
88-
"expected_tile_max": [1.0, 1.0, 1.0, 0.0],
89-
"expected_tile_min": [0.0, 0.0, 0.0, 0.0],
90-
"expected_aspect_ratio": [3, 1],
91-
"pad_max_tiles": True,
9268
},
9369
],
9470
)
@@ -103,7 +79,6 @@ def test_clip_image_transform(self, params):
10379
resample="bilinear",
10480
dtype=torch.float32,
10581
resize_to_max_canvas=params["resize_to_max_canvas"],
106-
pad_max_tiles=params["pad_max_tiles"],
10782
)
10883

10984
image_transform_inference = CLIPImageTransformInference(
@@ -115,7 +90,6 @@ def test_clip_image_transform(self, params):
11590
resample="bilinear",
11691
resize_to_max_canvas=params["resize_to_max_canvas"],
11792
antialias=True,
118-
pad_max_tiles=params["pad_max_tiles"],
11993
)
12094

12195
# Generate a deterministic image using np.arange for reproducibility
@@ -169,13 +143,7 @@ def test_clip_image_transform(self, params):
169143
), f"Expected aspect ratio {params['expected_aspect_ratio']} but got {tuple(output_ar.numpy())}"
170144

171145
# number of tiles matches the product of the aspect ratio
172-
if params["pad_max_tiles"]:
173-
# max_num_tiles=4.
174-
assert (
175-
4 == output_image.shape[0]
176-
), f"Expected 4 tiles but got {output_image.shape[0]}"
177-
else:
178-
expected_num_tiles = output_ar[0] * output_ar[1]
179-
assert (
180-
expected_num_tiles == output_image.shape[0]
181-
), f"Expected {expected_num_tiles} tiles but got {output_image.shape[0]}"
146+
expected_num_tiles = output_ar[0] * output_ar[1]
147+
assert (
148+
expected_num_tiles == output_image.shape[0]
149+
), f"Expected {expected_num_tiles} tiles but got {output_image.shape[0]}"

0 commit comments

Comments
 (0)