Closed
Description
import torch
from torchvision.prototype.transforms import functional as F
image_tensor = torch.rand(3, 7, 33)
image_pil = F.to_image_pil(image_tensor)
print(
F.get_spatial_size(F.crop(image_tensor, top=9, left=9, height=20, width=12)),
F.get_spatial_size(F.crop(image_pil, top=9, left=9, height=20, width=12)),
)
[22, 12] [20, 12]