Skip to content

Loosen overspecified type hints in functional #7529

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions torchvision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import numbers
import warnings
from enum import Enum
from typing import Any, List, Optional, Tuple, Union
from typing import Any, List, Optional, Tuple, Union, Sequence

import numpy as np
import torch
Expand Down Expand Up @@ -390,7 +390,7 @@ def _compute_resized_output_size(

def resize(
img: Tensor,
size: List[int],
size: Union[int,Sequence[int]],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
Expand Down Expand Up @@ -492,7 +492,7 @@ def resize(
return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)


def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
def pad(img: Tensor, padding: Union[int, Sequence[int]], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
Expand Down Expand Up @@ -566,7 +566,7 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
return F_t.crop(img, top, left, height, width)


def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
def center_crop(img: Tensor, output_size: Union[int, Sequence[int]]) -> Tensor:
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Expand Down Expand Up @@ -613,7 +613,7 @@ def resized_crop(
left: int,
height: int,
width: int,
size: List[int],
size: Union[int, Sequence[int]],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: Optional[Union[str, bool]] = "warn",
) -> Tensor:
Expand Down Expand Up @@ -782,7 +782,7 @@ def vflip(img: Tensor) -> Tensor:
return F_t.vflip(img)


def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
def five_crop(img: Tensor, size: Union[int, Sequence[int]]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Expand Down Expand Up @@ -828,7 +828,7 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten


def ten_crop(
img: Tensor, size: List[int], vertical_flip: bool = False
img: Tensor, size: Union[int, Sequence[int]], vertical_flip: bool = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Generate ten cropped images from the given image.
Crop the given image into four corners and the central crop plus the
Expand Down