|
15 | 15 | find_supported_resolutions, |
16 | 16 | get_canvas_best_fit, |
17 | 17 | ) |
18 | | -from torchtune.modules.transforms.vision_utils.pad_dim_to_size import pad_dim_to_size |
19 | 18 | from torchtune.modules.transforms.vision_utils.resize_with_pad import resize_with_pad |
20 | 19 | from torchtune.modules.transforms.vision_utils.tile_crop import tile_crop |
21 | 20 |
|
@@ -63,7 +62,6 @@ class CLIPImageTransform: |
63 | 62 | This will be used to generate possible_resolutions, |
64 | 63 | e.g. [(224, 224), (224, 448), (448, 224)] if max_num_tiles = 2 and tile_size = 224. |
65 | 64 | Default 4. |
66 | | - pad_max_tiles (bool): If True, the image will be padded to have tiles == max_num_tiles. Default False. |
67 | 65 | dtype (torch.dtype): Data type of the output image. Default torch.bfloat16. |
68 | 66 | resample (str): Resampling method used when resizing images. Supports any enum of |
69 | 67 | ``torchvision.transforms.InterpolationMode``, e.g. "nearest", "nearest_exact", "bilinear", "bicubic". |
@@ -101,7 +99,6 @@ def __init__( |
101 | 99 | possible_resolutions: Optional[List[Tuple[int, int]]] = None, |
102 | 100 | tile_size: int = 224, |
103 | 101 | max_num_tiles: Optional[int] = 4, |
104 | | - pad_max_tiles: bool = False, |
105 | 102 | dtype: torch.dtype = torch.bfloat16, |
106 | 103 | resample: str = "bilinear", |
107 | 104 | resize_to_max_canvas: bool = False, |
@@ -142,7 +139,6 @@ def __init__( |
142 | 139 | # tile_crop |
143 | 140 | self.tile_size = tile_size |
144 | 141 | self.tile_crop = tile_crop |
145 | | - self.pad_tile_size = max_num_tiles if pad_max_tiles else None |
146 | 142 |
|
147 | 143 | def __call__( |
148 | 144 | self, sample: Mapping[str, Any], inference: bool = False |
@@ -190,8 +186,6 @@ def __call__( |
190 | 186 |
|
191 | 187 | # Divide the image into equally sized tiles |
192 | 188 | image = self.tile_crop(image=image, tile_size=self.tile_size) |
193 | | - if self.pad_tile_size: |
194 | | - image = pad_dim_to_size(image, size=self.pad_tile_size, dim=0) |
195 | 189 |
|
196 | 190 | aspect_ratio = torch.tensor(best_resolution).reshape(-1) // self.tile_size |
197 | 191 |
|
|
0 commit comments