Skip to content

Commit d675ff4

Browse files
committed
move Datapoint out of public namespace
1 parent 5dc222b commit d675ff4

File tree

12 files changed

+107
-55
lines changed

12 files changed

+107
-55
lines changed

test/test_prototype_transforms_functional.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ def test_dispatcher_kernel_signatures_consistency(self, dispatcher_info, feature
409409
@pytest.mark.parametrize("info", DISPATCHER_INFOS, ids=lambda info: info.id)
410410
def test_dispatcher_feature_signatures_consistency(self, info):
411411
try:
412-
feature_method = getattr(datapoints.Datapoint, info.id)
412+
feature_method = getattr(datapoints._datapoint.Datapoint, info.id)
413413
except AttributeError:
414414
pytest.skip("Dispatcher doesn't support arbitrary feature dispatch.")
415415

torchvision/prototype/datapoints/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from ._bounding_box import BoundingBox, BoundingBoxFormat
2-
from ._datapoint import Datapoint, FillType, FillTypeJIT, InputType, InputTypeJIT
2+
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
33
from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
44
from ._label import Label, OneHotLabel
55
from ._mask import Mask

torchvision/prototype/datasets/_builtin/caltech.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44

55
import numpy as np
66
from torchdata.datapipes.iter import Filter, IterDataPipe, IterKeyZipper, Mapper
7-
from torchvision.prototype.datapoints import BoundingBox, Datapoint, Label
7+
from torchvision.prototype.datapoints import BoundingBox, Label
8+
from torchvision.prototype.datapoints._datapoint import Datapoint
89
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
910
from torchvision.prototype.datasets.utils._internal import (
1011
hint_sharding,

torchvision/prototype/datasets/_builtin/celeba.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
from typing import Any, BinaryIO, Dict, Iterator, List, Optional, Sequence, Tuple, Union
44

55
from torchdata.datapipes.iter import Filter, IterDataPipe, IterKeyZipper, Mapper, Zipper
6-
from torchvision.prototype.datapoints import BoundingBox, Datapoint, Label
6+
from torchvision.prototype.datapoints import BoundingBox, Label
7+
from torchvision.prototype.datapoints._datapoint import Datapoint
78
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
89
from torchvision.prototype.datasets.utils._internal import (
910
getitem,

torchvision/prototype/datasets/_builtin/coco.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
Mapper,
1515
UnBatcher,
1616
)
17-
from torchvision.prototype.datapoints import BoundingBox, Datapoint, Label, Mask
17+
from torchvision.prototype.datapoints import BoundingBox, Label, Mask
18+
from torchvision.prototype.datapoints._datapoint import Datapoint
1819
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
1920
from torchvision.prototype.datasets.utils._internal import (
2021
getitem,

torchvision/prototype/datasets/_builtin/cub200.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
Mapper,
1515
)
1616
from torchdata.datapipes.map import IterToMapConverter
17-
from torchvision.prototype.datapoints import BoundingBox, Datapoint, Label
17+
from torchvision.prototype.datapoints import BoundingBox, Label
18+
from torchvision.prototype.datapoints._datapoint import Datapoint
1819
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
1920
from torchvision.prototype.datasets.utils._internal import (
2021
getitem,

torchvision/prototype/datasets/_builtin/sbd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
import numpy as np
66
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
7-
from torchvision.prototype.datapoints import Datapoint
7+
from torchvision.prototype.datapoints._datapoint import Datapoint
88
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
99
from torchvision.prototype.datasets.utils._internal import (
1010
getitem,

torchvision/prototype/transforms/functional/_color.py

Lines changed: 44 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,11 @@ def adjust_brightness_video(video: torch.Tensor, brightness_factor: float) -> to
3838

3939

4040
def adjust_brightness(inpt: datapoints.InputTypeJIT, brightness_factor: float) -> datapoints.InputTypeJIT:
41-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
41+
if isinstance(inpt, torch.Tensor) and (
42+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
43+
):
4244
return adjust_brightness_image_tensor(inpt, brightness_factor=brightness_factor)
43-
elif isinstance(inpt, datapoints.Datapoint):
45+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
4446
return inpt.adjust_brightness(brightness_factor=brightness_factor)
4547
elif isinstance(inpt, PIL.Image.Image):
4648
return adjust_brightness_image_pil(inpt, brightness_factor=brightness_factor)
@@ -77,9 +79,11 @@ def adjust_saturation_video(video: torch.Tensor, saturation_factor: float) -> to
7779

7880

7981
def adjust_saturation(inpt: datapoints.InputTypeJIT, saturation_factor: float) -> datapoints.InputTypeJIT:
80-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
82+
if isinstance(inpt, torch.Tensor) and (
83+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
84+
):
8185
return adjust_saturation_image_tensor(inpt, saturation_factor=saturation_factor)
82-
elif isinstance(inpt, datapoints.Datapoint):
86+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
8387
return inpt.adjust_saturation(saturation_factor=saturation_factor)
8488
elif isinstance(inpt, PIL.Image.Image):
8589
return adjust_saturation_image_pil(inpt, saturation_factor=saturation_factor)
@@ -116,9 +120,11 @@ def adjust_contrast_video(video: torch.Tensor, contrast_factor: float) -> torch.
116120

117121

118122
def adjust_contrast(inpt: datapoints.InputTypeJIT, contrast_factor: float) -> datapoints.InputTypeJIT:
119-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
123+
if isinstance(inpt, torch.Tensor) and (
124+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
125+
):
120126
return adjust_contrast_image_tensor(inpt, contrast_factor=contrast_factor)
121-
elif isinstance(inpt, datapoints.Datapoint):
127+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
122128
return inpt.adjust_contrast(contrast_factor=contrast_factor)
123129
elif isinstance(inpt, PIL.Image.Image):
124130
return adjust_contrast_image_pil(inpt, contrast_factor=contrast_factor)
@@ -189,9 +195,11 @@ def adjust_sharpness_video(video: torch.Tensor, sharpness_factor: float) -> torc
189195

190196

191197
def adjust_sharpness(inpt: datapoints.InputTypeJIT, sharpness_factor: float) -> datapoints.InputTypeJIT:
192-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
198+
if isinstance(inpt, torch.Tensor) and (
199+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
200+
):
193201
return adjust_sharpness_image_tensor(inpt, sharpness_factor=sharpness_factor)
194-
elif isinstance(inpt, datapoints.Datapoint):
202+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
195203
return inpt.adjust_sharpness(sharpness_factor=sharpness_factor)
196204
elif isinstance(inpt, PIL.Image.Image):
197205
return adjust_sharpness_image_pil(inpt, sharpness_factor=sharpness_factor)
@@ -301,9 +309,11 @@ def adjust_hue_video(video: torch.Tensor, hue_factor: float) -> torch.Tensor:
301309

302310

303311
def adjust_hue(inpt: datapoints.InputTypeJIT, hue_factor: float) -> datapoints.InputTypeJIT:
304-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
312+
if isinstance(inpt, torch.Tensor) and (
313+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
314+
):
305315
return adjust_hue_image_tensor(inpt, hue_factor=hue_factor)
306-
elif isinstance(inpt, datapoints.Datapoint):
316+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
307317
return inpt.adjust_hue(hue_factor=hue_factor)
308318
elif isinstance(inpt, PIL.Image.Image):
309319
return adjust_hue_image_pil(inpt, hue_factor=hue_factor)
@@ -341,9 +351,11 @@ def adjust_gamma_video(video: torch.Tensor, gamma: float, gain: float = 1) -> to
341351

342352

343353
def adjust_gamma(inpt: datapoints.InputTypeJIT, gamma: float, gain: float = 1) -> datapoints.InputTypeJIT:
344-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
354+
if isinstance(inpt, torch.Tensor) and (
355+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
356+
):
345357
return adjust_gamma_image_tensor(inpt, gamma=gamma, gain=gain)
346-
elif isinstance(inpt, datapoints.Datapoint):
358+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
347359
return inpt.adjust_gamma(gamma=gamma, gain=gain)
348360
elif isinstance(inpt, PIL.Image.Image):
349361
return adjust_gamma_image_pil(inpt, gamma=gamma, gain=gain)
@@ -375,9 +387,11 @@ def posterize_video(video: torch.Tensor, bits: int) -> torch.Tensor:
375387

376388

377389
def posterize(inpt: datapoints.InputTypeJIT, bits: int) -> datapoints.InputTypeJIT:
378-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
390+
if isinstance(inpt, torch.Tensor) and (
391+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
392+
):
379393
return posterize_image_tensor(inpt, bits=bits)
380-
elif isinstance(inpt, datapoints.Datapoint):
394+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
381395
return inpt.posterize(bits=bits)
382396
elif isinstance(inpt, PIL.Image.Image):
383397
return posterize_image_pil(inpt, bits=bits)
@@ -403,9 +417,11 @@ def solarize_video(video: torch.Tensor, threshold: float) -> torch.Tensor:
403417

404418

405419
def solarize(inpt: datapoints.InputTypeJIT, threshold: float) -> datapoints.InputTypeJIT:
406-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
420+
if isinstance(inpt, torch.Tensor) and (
421+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
422+
):
407423
return solarize_image_tensor(inpt, threshold=threshold)
408-
elif isinstance(inpt, datapoints.Datapoint):
424+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
409425
return inpt.solarize(threshold=threshold)
410426
elif isinstance(inpt, PIL.Image.Image):
411427
return solarize_image_pil(inpt, threshold=threshold)
@@ -453,9 +469,11 @@ def autocontrast_video(video: torch.Tensor) -> torch.Tensor:
453469

454470

455471
def autocontrast(inpt: datapoints.InputTypeJIT) -> datapoints.InputTypeJIT:
456-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
472+
if isinstance(inpt, torch.Tensor) and (
473+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
474+
):
457475
return autocontrast_image_tensor(inpt)
458-
elif isinstance(inpt, datapoints.Datapoint):
476+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
459477
return inpt.autocontrast()
460478
elif isinstance(inpt, PIL.Image.Image):
461479
return autocontrast_image_pil(inpt)
@@ -543,9 +561,11 @@ def equalize_video(video: torch.Tensor) -> torch.Tensor:
543561

544562

545563
def equalize(inpt: datapoints.InputTypeJIT) -> datapoints.InputTypeJIT:
546-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
564+
if isinstance(inpt, torch.Tensor) and (
565+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
566+
):
547567
return equalize_image_tensor(inpt)
548-
elif isinstance(inpt, datapoints.Datapoint):
568+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
549569
return inpt.equalize()
550570
elif isinstance(inpt, PIL.Image.Image):
551571
return equalize_image_pil(inpt)
@@ -574,9 +594,11 @@ def invert_video(video: torch.Tensor) -> torch.Tensor:
574594

575595

576596
def invert(inpt: datapoints.InputTypeJIT) -> datapoints.InputTypeJIT:
577-
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Datapoint)):
597+
if isinstance(inpt, torch.Tensor) and (
598+
torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint)
599+
):
578600
return invert_image_tensor(inpt)
579-
elif isinstance(inpt, datapoints.Datapoint):
601+
elif isinstance(inpt, datapoints._datapoint.Datapoint):
580602
return inpt.invert()
581603
elif isinstance(inpt, PIL.Image.Image):
582604
return invert_image_pil(inpt)

0 commit comments

Comments
 (0)