From cb175ccccbc07bb8fc8e4dbb516a6a1d58852c8c Mon Sep 17 00:00:00 2001 From: Jirka Date: Sun, 10 Oct 2021 23:41:46 +0200 Subject: [PATCH 01/17] reorder --- .pre-commit-config.yaml | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0024d0243d2..7c4beba20a9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,13 @@ repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-docstring-first + - id: check-toml + - id: check-yaml + exclude: packaging/.* + - id: end-of-file-fixer + - repo: https://github.com/omnilib/ufmt rev: v1.3.0 hooks: @@ -6,16 +15,9 @@ repos: additional_dependencies: - black == 21.9b0 - usort == 0.6.4 + - repo: https://gitlab.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 args: [--config=setup.cfg] - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: check-docstring-first - - id: check-toml - - id: check-yaml - exclude: packaging/.* - - id: end-of-file-fixer From 6098b51e1cd3b08b39636b417b593852e09ffc6e Mon Sep 17 00:00:00 2001 From: Jirka Date: Sun, 10 Oct 2021 23:44:07 +0200 Subject: [PATCH 02/17] edit setup --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index a83e555acda..eb0c6b4bb29 100644 --- a/setup.py +++ b/setup.py @@ -473,6 +473,7 @@ def run(self): "scipy": ["scipy"], }, ext_modules=get_extensions(), + python_requires='>=3.6', cmdclass={ "build_ext": BuildExtension.with_options(no_python_abi_suffix=True), "clean": clean, From a8fe5f775bb1ca1c47f7f96f7dcda6c081c28428 Mon Sep 17 00:00:00 2001 From: Jirka Date: Sun, 10 Oct 2021 23:44:37 +0200 Subject: [PATCH 03/17] def pyupgrade --- .pre-commit-config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7c4beba20a9..4d7c783602c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,6 +8,13 @@ repos: exclude: packaging/.* - id: end-of-file-fixer + - repo: https://github.com/asottile/pyupgrade + rev: v2.29.0 + hooks: + - id: pyupgrade + args: [--py36-plus] + name: Upgrade code + - repo: https://github.com/omnilib/ufmt rev: v1.3.0 hooks: From 15fc26c7008e792893b7dba1581e4cf080933f73 Mon Sep 17 00:00:00 2001 From: Jirka Date: Sun, 10 Oct 2021 23:47:16 +0200 Subject: [PATCH 04/17] apply --- .../linux/scripts/run-clang-format.py | 23 ++-- docs/source/conf.py | 1 - gallery/plot_scripted_tensor_transforms.py | 2 +- packaging/wheel/relocate.py | 34 +++-- references/classification/train.py | 16 +-- .../classification/train_quantization.py | 4 +- references/classification/transforms.py | 16 +-- references/classification/utils.py | 12 +- references/detection/coco_utils.py | 8 +- references/detection/engine.py | 4 +- references/detection/group_by_aspect_ratio.py | 4 +- references/detection/train.py | 4 +- references/detection/transforms.py | 10 +- references/detection/utils.py | 12 +- references/segmentation/coco_utils.py | 4 +- references/segmentation/train.py | 6 +- references/segmentation/transforms.py | 12 +- references/segmentation/utils.py | 18 +-- references/similarity/loss.py | 2 +- references/similarity/model.py | 2 +- references/similarity/train.py | 4 +- references/video_classification/train.py | 14 +-- references/video_classification/utils.py | 12 +- setup.py | 41 +++--- test/common_utils.py | 6 +- test/datasets_utils.py | 2 +- test/preprocess-bench.py | 2 +- test/test_backbone_utils.py | 4 +- test/test_datasets.py | 4 +- test/test_datasets_utils.py | 6 +- test/test_functional_tensor.py | 16 +-- test/test_image.py | 8 +- test/test_models.py | 12 +- test/test_onnx.py | 12 +- test/test_ops.py | 4 +- test/test_transforms.py | 16 +-- test/test_transforms_tensor.py | 6 +- test/test_transforms_video.py | 2 +- torchvision/__init__.py | 2 +- torchvision/datasets/caltech.py | 14 +-- torchvision/datasets/celeba.py | 4 +- torchvision/datasets/cifar.py | 2 +- torchvision/datasets/cityscapes.py | 12 +- torchvision/datasets/fakedata.py | 6 +- torchvision/datasets/flickr.py | 6 +- torchvision/datasets/folder.py | 6 +- torchvision/datasets/hmdb51.py | 6 +- torchvision/datasets/imagenet.py | 6 +- torchvision/datasets/inaturalist.py | 4 +- torchvision/datasets/kinetics.py | 4 +- torchvision/datasets/lfw.py | 16 ++- torchvision/datasets/lsun.py | 4 +- torchvision/datasets/mnist.py | 20 +-- torchvision/datasets/omniglot.py | 4 +- torchvision/datasets/phototour.py | 16 +-- torchvision/datasets/places365.py | 4 +- torchvision/datasets/sbd.py | 4 +- torchvision/datasets/sbu.py | 2 +- torchvision/datasets/semeion.py | 2 +- torchvision/datasets/stl10.py | 4 +- torchvision/datasets/svhn.py | 2 +- torchvision/datasets/ucf101.py | 8 +- torchvision/datasets/usps.py | 2 +- torchvision/datasets/utils.py | 4 +- torchvision/datasets/video_utils.py | 6 +- torchvision/datasets/vision.py | 10 +- torchvision/datasets/voc.py | 2 +- torchvision/datasets/widerface.py | 8 +- torchvision/io/_video_opt.py | 6 +- torchvision/models/_utils.py | 2 +- torchvision/models/alexnet.py | 2 +- torchvision/models/densenet.py | 8 +- torchvision/models/detection/_utils.py | 6 +- torchvision/models/detection/anchor_utils.py | 4 +- .../models/detection/backbone_utils.py | 2 +- torchvision/models/detection/faster_rcnn.py | 8 +- .../models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/image_list.py | 2 +- torchvision/models/detection/keypoint_rcnn.py | 6 +- torchvision/models/detection/mask_rcnn.py | 10 +- torchvision/models/detection/roi_heads.py | 2 +- torchvision/models/detection/rpn.py | 4 +- torchvision/models/detection/ssd.py | 2 +- torchvision/models/detection/ssdlite.py | 2 +- torchvision/models/detection/transform.py | 6 +- torchvision/models/efficientnet.py | 4 +- torchvision/models/feature_extraction.py | 4 +- torchvision/models/googlenet.py | 8 +- torchvision/models/inception.py | 16 +-- torchvision/models/mnasnet.py | 8 +- torchvision/models/mobilenetv2.py | 4 +- torchvision/models/mobilenetv3.py | 4 +- torchvision/models/quantization/googlenet.py | 12 +- torchvision/models/quantization/inception.py | 28 ++--- .../models/quantization/mobilenetv2.py | 4 +- .../models/quantization/mobilenetv3.py | 2 +- torchvision/models/quantization/resnet.py | 6 +- .../models/quantization/shufflenetv2.py | 6 +- torchvision/models/resnet.py | 6 +- torchvision/models/segmentation/_utils.py | 2 +- torchvision/models/segmentation/deeplabv3.py | 8 +- torchvision/models/segmentation/fcn.py | 2 +- .../models/segmentation/segmentation.py | 4 +- torchvision/models/shufflenetv2.py | 8 +- torchvision/models/squeezenet.py | 4 +- torchvision/models/vgg.py | 2 +- torchvision/models/video/resnet.py | 16 +-- torchvision/ops/deform_conv.py | 2 +- torchvision/ops/feature_pyramid_network.py | 4 +- torchvision/ops/misc.py | 4 +- torchvision/ops/poolers.py | 4 +- torchvision/ops/ps_roi_align.py | 2 +- torchvision/ops/ps_roi_pool.py | 2 +- torchvision/ops/roi_align.py | 2 +- torchvision/ops/roi_pool.py | 2 +- torchvision/ops/stochastic_depth.py | 4 +- .../prototype/datasets/utils/_dataset.py | 2 +- torchvision/transforms/_functional_video.py | 2 +- torchvision/transforms/_transforms_video.py | 18 +-- torchvision/transforms/autoaugment.py | 6 +- torchvision/transforms/functional.py | 58 +++++---- torchvision/transforms/functional_pil.py | 50 ++++---- torchvision/transforms/functional_tensor.py | 38 +++--- torchvision/transforms/transforms.py | 118 +++++++++--------- torchvision/utils.py | 4 +- 125 files changed, 537 insertions(+), 572 deletions(-) diff --git a/.circleci/unittest/linux/scripts/run-clang-format.py b/.circleci/unittest/linux/scripts/run-clang-format.py index 9d6c66b90f6..6c336526382 100755 --- a/.circleci/unittest/linux/scripts/run-clang-format.py +++ b/.circleci/unittest/linux/scripts/run-clang-format.py @@ -34,7 +34,6 @@ import argparse import difflib import fnmatch -import io import multiprocessing import os import signal @@ -87,20 +86,20 @@ def list_files(files, recursive=False, extensions=None, exclude=None): def make_diff(file, original, reformatted): return list( difflib.unified_diff( - original, reformatted, fromfile="{}\t(original)".format(file), tofile="{}\t(reformatted)".format(file), n=3 + original, reformatted, fromfile=f"{file}\t(original)", tofile=f"{file}\t(reformatted)", n=3 ) ) class DiffError(Exception): def __init__(self, message, errs=None): - super(DiffError, self).__init__(message) + super().__init__(message) self.errs = errs or [] class UnexpectedError(Exception): def __init__(self, message, exc=None): - super(UnexpectedError, self).__init__(message) + super().__init__(message) self.formatted_traceback = traceback.format_exc() self.exc = exc @@ -112,14 +111,14 @@ def run_clang_format_diff_wrapper(args, file): except DiffError: raise except Exception as e: - raise UnexpectedError("{}: {}: {}".format(file, e.__class__.__name__, e), e) + raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e) def run_clang_format_diff(args, file): try: - with io.open(file, "r", encoding="utf-8") as f: + with open(file, encoding="utf-8") as f: original = f.readlines() - except IOError as exc: + except OSError as exc: raise DiffError(str(exc)) invocation = [args.clang_format_executable, file] @@ -145,7 +144,7 @@ def run_clang_format_diff(args, file): invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8" ) except OSError as exc: - raise DiffError("Command '{}' failed to start: {}".format(subprocess.list2cmdline(invocation), exc)) + raise DiffError(f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}") proc_stdout = proc.stdout proc_stderr = proc.stderr @@ -203,7 +202,7 @@ def print_trouble(prog, message, use_colors): error_text = "error:" if use_colors: error_text = bold_red(error_text) - print("{}: {} {}".format(prog, error_text, message), file=sys.stderr) + print(f"{prog}: {error_text} {message}", file=sys.stderr) def main(): @@ -216,7 +215,7 @@ def main(): ) parser.add_argument( "--extensions", - help="comma separated list of file extensions (default: {})".format(DEFAULT_EXTENSIONS), + help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})", default=DEFAULT_EXTENSIONS, ) parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories") @@ -263,7 +262,7 @@ def main(): colored_stdout = sys.stdout.isatty() colored_stderr = sys.stderr.isatty() - version_invocation = [args.clang_format_executable, str("--version")] + version_invocation = [args.clang_format_executable, "--version"] try: subprocess.check_call(version_invocation, stdout=DEVNULL) except subprocess.CalledProcessError as e: @@ -272,7 +271,7 @@ def main(): except OSError as e: print_trouble( parser.prog, - "Command '{}' failed to start: {}".format(subprocess.list2cmdline(version_invocation), e), + f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}", use_colors=colored_stderr, ) return ExitStatus.TROUBLE diff --git a/docs/source/conf.py b/docs/source/conf.py index 4c2f3faec75..15a8a5cde6b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # # PyTorch documentation build configuration file, created by # sphinx-quickstart on Fri Dec 23 13:31:47 2016. diff --git a/gallery/plot_scripted_tensor_transforms.py b/gallery/plot_scripted_tensor_transforms.py index 6f3cc22073e..a9205536821 100644 --- a/gallery/plot_scripted_tensor_transforms.py +++ b/gallery/plot_scripted_tensor_transforms.py @@ -125,7 +125,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: import json -with open(Path('assets') / 'imagenet_class_index.json', 'r') as labels_file: +with open(Path('assets') / 'imagenet_class_index.json') as labels_file: labels = json.load(labels_file) for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)): diff --git a/packaging/wheel/relocate.py b/packaging/wheel/relocate.py index 3a94d3a58c1..665dab3bf40 100644 --- a/packaging/wheel/relocate.py +++ b/packaging/wheel/relocate.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """Helper script to package wheels and relocate binaries.""" import glob @@ -157,7 +155,7 @@ def relocate_elf_library(patchelf, output_dir, output_library, binary): rename and copy them into the wheel while updating their respective rpaths. """ - print("Relocating {0}".format(binary)) + print(f"Relocating {binary}") binary_path = osp.join(output_library, binary) ld_tree = lddtree(binary_path) @@ -173,12 +171,12 @@ def relocate_elf_library(patchelf, output_dir, output_library, binary): print(library) if library_info["path"] is None: - print("Omitting {0}".format(library)) + print(f"Omitting {library}") continue if library in ALLOWLIST: # Omit glibc/gcc/system libraries - print("Omitting {0}".format(library)) + print(f"Omitting {library}") continue parent_dependencies = binary_dependencies.get(parent, []) @@ -201,7 +199,7 @@ def relocate_elf_library(patchelf, output_dir, output_library, binary): if library != binary: library_path = binary_paths[library] new_library_path = patch_new_path(library_path, new_libraries_path) - print("{0} -> {1}".format(library, new_library_path)) + print(f"{library} -> {new_library_path}") shutil.copyfile(library_path, new_library_path) new_names[library] = new_library_path @@ -214,7 +212,7 @@ def relocate_elf_library(patchelf, output_dir, output_library, binary): new_library_name = new_names[library] for dep in library_dependencies: new_dep = osp.basename(new_names[dep]) - print("{0}: {1} -> {2}".format(library, dep, new_dep)) + print(f"{library}: {dep} -> {new_dep}") subprocess.check_output( [patchelf, "--replace-needed", dep, new_dep, new_library_name], cwd=new_libraries_path ) @@ -228,7 +226,7 @@ def relocate_elf_library(patchelf, output_dir, output_library, binary): library_dependencies = binary_dependencies[binary] for dep in library_dependencies: new_dep = osp.basename(new_names[dep]) - print("{0}: {1} -> {2}".format(binary, dep, new_dep)) + print(f"{binary}: {dep} -> {new_dep}") subprocess.check_output([patchelf, "--replace-needed", dep, new_dep, binary], cwd=output_library) print("Update library rpath") @@ -244,7 +242,7 @@ def relocate_dll_library(dumpbin, output_dir, output_library, binary): Given a shared library, find the transitive closure of its dependencies, rename and copy them into the wheel. """ - print("Relocating {0}".format(binary)) + print(f"Relocating {binary}") binary_path = osp.join(output_library, binary) library_dlls = find_dll_dependencies(dumpbin, binary_path) @@ -255,18 +253,18 @@ def relocate_dll_library(dumpbin, output_dir, output_library, binary): while binary_queue != []: library, parent = binary_queue.pop(0) if library in WINDOWS_ALLOWLIST or library.startswith("api-ms-win"): - print("Omitting {0}".format(library)) + print(f"Omitting {library}") continue library_path = find_program(library) if library_path is None: - print("{0} not found".format(library)) + print(f"{library} not found") continue if osp.basename(osp.dirname(library_path)) == "system32": continue - print("{0}: {1}".format(library, library_path)) + print(f"{library}: {library_path}") parent_dependencies = binary_dependencies.get(parent, []) parent_dependencies.append(library) binary_dependencies[parent] = parent_dependencies @@ -284,7 +282,7 @@ def relocate_dll_library(dumpbin, output_dir, output_library, binary): if library != binary: library_path = binary_paths[library] new_library_path = osp.join(package_dir, library) - print("{0} -> {1}".format(library, new_library_path)) + print(f"{library} -> {new_library_path}") shutil.copyfile(library_path, new_library_path) @@ -300,16 +298,16 @@ def compress_wheel(output_dir, wheel, wheel_dir, wheel_name): full_file = osp.join(root, this_file) rel_file = osp.relpath(full_file, output_dir) if full_file == record_file: - f.write("{0},,\n".format(rel_file)) + f.write(f"{rel_file},,\n") else: digest, size = rehash(full_file) - f.write("{0},{1},{2}\n".format(rel_file, digest, size)) + f.write(f"{rel_file},{digest},{size}\n") print("Compressing wheel") base_wheel_name = osp.join(wheel_dir, wheel_name) shutil.make_archive(base_wheel_name, "zip", output_dir) os.remove(wheel) - shutil.move("{0}.zip".format(base_wheel_name), wheel) + shutil.move(f"{base_wheel_name}.zip", wheel) shutil.rmtree(output_dir) @@ -338,7 +336,7 @@ def patch_linux(): print("Unzipping wheel...") wheel_file = osp.basename(wheel) wheel_dir = osp.dirname(wheel) - print("{0}".format(wheel_file)) + print(f"{wheel_file}") wheel_name, _ = osp.splitext(wheel_file) unzip_file(wheel, output_dir) @@ -376,7 +374,7 @@ def patch_win(): print("Unzipping wheel...") wheel_file = osp.basename(wheel) wheel_dir = osp.dirname(wheel) - print("{0}".format(wheel_file)) + print(f"{wheel_file}") wheel_name, _ = osp.splitext(wheel_file) unzip_file(wheel, output_dir) diff --git a/references/classification/train.py b/references/classification/train.py index 9b1994bad57..565878a42a7 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -22,7 +22,7 @@ def train_one_epoch( metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}")) - header = "Epoch: [{}]".format(epoch) + header = f"Epoch: [{epoch}]" for image, target in metric_logger.log_every(data_loader, print_freq, header): start_time = time.time() image, target = image.to(device), target.to(device) @@ -130,7 +130,7 @@ def load_data(traindir, valdir, args): cache_path = _get_cache_path(traindir) if args.cache_dataset and os.path.exists(cache_path): # Attention, as the transforms are also cached! - print("Loading dataset_train from {}".format(cache_path)) + print(f"Loading dataset_train from {cache_path}") dataset, _ = torch.load(cache_path) else: auto_augment_policy = getattr(args, "auto_augment", None) @@ -142,7 +142,7 @@ def load_data(traindir, valdir, args): ), ) if args.cache_dataset: - print("Saving dataset_train to {}".format(cache_path)) + print(f"Saving dataset_train to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset, traindir), cache_path) print("Took", time.time() - st) @@ -151,7 +151,7 @@ def load_data(traindir, valdir, args): cache_path = _get_cache_path(valdir) if args.cache_dataset and os.path.exists(cache_path): # Attention, as the transforms are also cached! - print("Loading dataset_test from {}".format(cache_path)) + print(f"Loading dataset_test from {cache_path}") dataset_test, _ = torch.load(cache_path) else: dataset_test = torchvision.datasets.ImageFolder( @@ -159,7 +159,7 @@ def load_data(traindir, valdir, args): presets.ClassificationPresetEval(crop_size=crop_size, resize_size=resize_size, interpolation=interpolation), ) if args.cache_dataset: - print("Saving dataset_test to {}".format(cache_path)) + print(f"Saving dataset_test to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset_test, valdir), cache_path) @@ -243,7 +243,7 @@ def main(args): alpha=0.9, ) else: - raise RuntimeError("Invalid optimizer {}. Only SGD and RMSprop are supported.".format(args.opt)) + raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD and RMSprop are supported.") scaler = torch.cuda.amp.GradScaler() if args.amp else None @@ -329,12 +329,12 @@ def main(args): } if model_ema: checkpoint["model_ema"] = model_ema.state_dict() - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") def get_args_parser(add_help=True): diff --git a/references/classification/train_quantization.py b/references/classification/train_quantization.py index 5bf64aea721..8b2b6dc85e6 100644 --- a/references/classification/train_quantization.py +++ b/references/classification/train_quantization.py @@ -141,13 +141,13 @@ def main(args): "epoch": epoch, "args": args, } - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) print("Saving models after epoch ", epoch) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") def get_args_parser(add_help=True): diff --git a/references/classification/transforms.py b/references/classification/transforms.py index 7788c9e5c3f..400830c1188 100644 --- a/references/classification/transforms.py +++ b/references/classification/transforms.py @@ -39,13 +39,13 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: Tensor: Randomly transformed batch. """ if batch.ndim != 4: - raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim)) + raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}") elif target.ndim != 1: - raise ValueError("Target ndim should be 1. Got {}".format(target.ndim)) + raise ValueError(f"Target ndim should be 1. Got {target.ndim}") elif not batch.is_floating_point(): - raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype)) + raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.") elif target.dtype != torch.int64: - raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype)) + raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}") if not self.inplace: batch = batch.clone() @@ -115,13 +115,13 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: Tensor: Randomly transformed batch. """ if batch.ndim != 4: - raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim)) + raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}") elif target.ndim != 1: - raise ValueError("Target ndim should be 1. Got {}".format(target.ndim)) + raise ValueError(f"Target ndim should be 1. Got {target.ndim}") elif not batch.is_floating_point(): - raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype)) + raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.") elif target.dtype != torch.int64: - raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype)) + raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}") if not self.inplace: batch = batch.clone() diff --git a/references/classification/utils.py b/references/classification/utils.py index c186a60fc1e..8a40ad861ac 100644 --- a/references/classification/utils.py +++ b/references/classification/utils.py @@ -10,7 +10,7 @@ import torch.distributed as dist -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -65,7 +65,7 @@ def __str__(self): ) -class MetricLogger(object): +class MetricLogger: def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter @@ -82,12 +82,12 @@ def __getattr__(self, attr): return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def __str__(self): loss_str = [] for name, meter in self.meters.items(): - loss_str.append("{}: {}".format(name, str(meter))) + loss_str.append(f"{name}: {str(meter)}") return self.delimiter.join(loss_str) def synchronize_between_processes(self): @@ -152,7 +152,7 @@ def log_every(self, iterable, print_freq, header=None): end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("{} Total time: {}".format(header, total_time_str)) + print(f"{header} Total time: {total_time_str}") class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel): @@ -270,7 +270,7 @@ def init_distributed_mode(args): torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) + print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank ) diff --git a/references/detection/coco_utils.py b/references/detection/coco_utils.py index a0c29600817..a6566028659 100644 --- a/references/detection/coco_utils.py +++ b/references/detection/coco_utils.py @@ -9,7 +9,7 @@ from pycocotools.coco import COCO -class FilterAndRemapCocoCategories(object): +class FilterAndRemapCocoCategories: def __init__(self, categories, remap=True): self.categories = categories self.remap = remap @@ -44,7 +44,7 @@ def convert_coco_poly_to_mask(segmentations, height, width): return masks -class ConvertCocoPolysToMask(object): +class ConvertCocoPolysToMask: def __call__(self, image, target): w, h = image.size @@ -205,11 +205,11 @@ def get_coco_api_from_dataset(dataset): class CocoDetection(torchvision.datasets.CocoDetection): def __init__(self, img_folder, ann_file, transforms): - super(CocoDetection, self).__init__(img_folder, ann_file) + super().__init__(img_folder, ann_file) self._transforms = transforms def __getitem__(self, idx): - img, target = super(CocoDetection, self).__getitem__(idx) + img, target = super().__getitem__(idx) image_id = self.ids[idx] target = dict(image_id=image_id, annotations=target) if self._transforms is not None: diff --git a/references/detection/engine.py b/references/detection/engine.py index 2ca7df808ef..eb6bcf6b476 100644 --- a/references/detection/engine.py +++ b/references/detection/engine.py @@ -13,7 +13,7 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}")) - header = "Epoch: [{}]".format(epoch) + header = f"Epoch: [{epoch}]" lr_scheduler = None if epoch == 0: @@ -39,7 +39,7 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq): loss_value = losses_reduced.item() if not math.isfinite(loss_value): - print("Loss is {}, stopping training".format(loss_value)) + print(f"Loss is {loss_value}, stopping training") print(loss_dict_reduced) sys.exit(1) diff --git a/references/detection/group_by_aspect_ratio.py b/references/detection/group_by_aspect_ratio.py index 8d680f1b18b..17f17ee495e 100644 --- a/references/detection/group_by_aspect_ratio.py +++ b/references/detection/group_by_aspect_ratio.py @@ -193,6 +193,6 @@ def create_aspect_ratio_groups(dataset, k=0): # count number of elements per group counts = np.unique(groups, return_counts=True)[1] fbins = [0] + bins + [np.inf] - print("Using {} as bins for aspect ratio quantization".format(fbins)) - print("Count of instances per bin: {}".format(counts)) + print(f"Using {fbins} as bins for aspect ratio quantization") + print(f"Count of instances per bin: {counts}") return groups diff --git a/references/detection/train.py b/references/detection/train.py index e86762342cc..f228a639208 100644 --- a/references/detection/train.py +++ b/references/detection/train.py @@ -223,7 +223,7 @@ def main(args): "args": args, "epoch": epoch, } - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) # evaluate after every epoch @@ -231,7 +231,7 @@ def main(args): total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") if __name__ == "__main__": diff --git a/references/detection/transforms.py b/references/detection/transforms.py index 787bb75a5c5..4ab5a652539 100644 --- a/references/detection/transforms.py +++ b/references/detection/transforms.py @@ -17,7 +17,7 @@ def _flip_coco_person_keypoints(kps, width): return flipped_data -class Compose(object): +class Compose: def __init__(self, transforms): self.transforms = transforms @@ -103,7 +103,7 @@ def forward( if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) + raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") elif image.ndimension() == 2: image = image.unsqueeze(0) @@ -171,7 +171,7 @@ def __init__( self.fill = fill self.side_range = side_range if side_range[0] < 1.0 or side_range[0] > side_range[1]: - raise ValueError("Invalid canvas side range provided {}.".format(side_range)) + raise ValueError(f"Invalid canvas side range provided {side_range}.") self.p = p @torch.jit.unused @@ -185,7 +185,7 @@ def forward( ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) + raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") elif image.ndimension() == 2: image = image.unsqueeze(0) @@ -244,7 +244,7 @@ def forward( ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) + raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") elif image.ndimension() == 2: image = image.unsqueeze(0) diff --git a/references/detection/utils.py b/references/detection/utils.py index c708ca05413..20280348fdd 100644 --- a/references/detection/utils.py +++ b/references/detection/utils.py @@ -8,7 +8,7 @@ import torch.distributed as dist -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -110,7 +110,7 @@ def reduce_dict(input_dict, average=True): return reduced_dict -class MetricLogger(object): +class MetricLogger: def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter @@ -127,12 +127,12 @@ def __getattr__(self, attr): return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def __str__(self): loss_str = [] for name, meter in self.meters.items(): - loss_str.append("{}: {}".format(name, str(meter))) + loss_str.append(f"{name}: {str(meter)}") return self.delimiter.join(loss_str) def synchronize_between_processes(self): @@ -197,7 +197,7 @@ def log_every(self, iterable, print_freq, header=None): end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable))) + print(f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)") def collate_fn(batch): @@ -274,7 +274,7 @@ def init_distributed_mode(args): torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) + print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank ) diff --git a/references/segmentation/coco_utils.py b/references/segmentation/coco_utils.py index 83091c75d95..4d37187f7ec 100644 --- a/references/segmentation/coco_utils.py +++ b/references/segmentation/coco_utils.py @@ -9,7 +9,7 @@ from transforms import Compose -class FilterAndRemapCocoCategories(object): +class FilterAndRemapCocoCategories: def __init__(self, categories, remap=True): self.categories = categories self.remap = remap @@ -41,7 +41,7 @@ def convert_coco_poly_to_mask(segmentations, height, width): return masks -class ConvertCocoPolysToMask(object): +class ConvertCocoPolysToMask: def __call__(self, image, anno): w, h = image.size segmentations = [obj["segmentation"] for obj in anno] diff --git a/references/segmentation/train.py b/references/segmentation/train.py index 3a41f86ba87..3facddb828f 100644 --- a/references/segmentation/train.py +++ b/references/segmentation/train.py @@ -66,7 +66,7 @@ def train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, devi model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) - header = "Epoch: [{}]".format(epoch) + header = f"Epoch: [{epoch}]" for image, target in metric_logger.log_every(data_loader, print_freq, header): image, target = image.to(device), target.to(device) output = model(image) @@ -188,12 +188,12 @@ def main(args): "epoch": epoch, "args": args, } - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") def get_args_parser(add_help=True): diff --git a/references/segmentation/transforms.py b/references/segmentation/transforms.py index cf4846a1c27..518048db2fa 100644 --- a/references/segmentation/transforms.py +++ b/references/segmentation/transforms.py @@ -16,7 +16,7 @@ def pad_if_smaller(img, size, fill=0): return img -class Compose(object): +class Compose: def __init__(self, transforms): self.transforms = transforms @@ -26,7 +26,7 @@ def __call__(self, image, target): return image, target -class RandomResize(object): +class RandomResize: def __init__(self, min_size, max_size=None): self.min_size = min_size if max_size is None: @@ -40,7 +40,7 @@ def __call__(self, image, target): return image, target -class RandomHorizontalFlip(object): +class RandomHorizontalFlip: def __init__(self, flip_prob): self.flip_prob = flip_prob @@ -51,7 +51,7 @@ def __call__(self, image, target): return image, target -class RandomCrop(object): +class RandomCrop: def __init__(self, size): self.size = size @@ -64,7 +64,7 @@ def __call__(self, image, target): return image, target -class CenterCrop(object): +class CenterCrop: def __init__(self, size): self.size = size @@ -90,7 +90,7 @@ def __call__(self, image, target): return image, target -class Normalize(object): +class Normalize: def __init__(self, mean, std): self.mean = mean self.std = std diff --git a/references/segmentation/utils.py b/references/segmentation/utils.py index 2bb5451289a..45e6f025393 100644 --- a/references/segmentation/utils.py +++ b/references/segmentation/utils.py @@ -8,7 +8,7 @@ import torch.distributed as dist -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -67,7 +67,7 @@ def __str__(self): ) -class ConfusionMatrix(object): +class ConfusionMatrix: def __init__(self, num_classes): self.num_classes = num_classes self.mat = None @@ -103,13 +103,13 @@ def __str__(self): acc_global, acc, iu = self.compute() return ("global correct: {:.1f}\n" "average row correct: {}\n" "IoU: {}\n" "mean IoU: {:.1f}").format( acc_global.item() * 100, - ["{:.1f}".format(i) for i in (acc * 100).tolist()], - ["{:.1f}".format(i) for i in (iu * 100).tolist()], + [f"{i:.1f}" for i in (acc * 100).tolist()], + [f"{i:.1f}" for i in (iu * 100).tolist()], iu.mean().item() * 100, ) -class MetricLogger(object): +class MetricLogger: def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter @@ -126,12 +126,12 @@ def __getattr__(self, attr): return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def __str__(self): loss_str = [] for name, meter in self.meters.items(): - loss_str.append("{}: {}".format(name, str(meter))) + loss_str.append(f"{name}: {str(meter)}") return self.delimiter.join(loss_str) def synchronize_between_processes(self): @@ -196,7 +196,7 @@ def log_every(self, iterable, print_freq, header=None): end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("{} Total time: {}".format(header, total_time_str)) + print(f"{header} Total time: {total_time_str}") def cat_list(images, fill_value=0): @@ -287,7 +287,7 @@ def init_distributed_mode(args): torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) + print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank ) diff --git a/references/similarity/loss.py b/references/similarity/loss.py index 237ad8e9e11..971810a0663 100644 --- a/references/similarity/loss.py +++ b/references/similarity/loss.py @@ -8,7 +8,7 @@ class TripletMarginLoss(nn.Module): def __init__(self, margin=1.0, p=2.0, mining="batch_all"): - super(TripletMarginLoss, self).__init__() + super().__init__() self.margin = margin self.p = p self.mining = mining diff --git a/references/similarity/model.py b/references/similarity/model.py index 3b39c0ec0dc..f235ae11116 100644 --- a/references/similarity/model.py +++ b/references/similarity/model.py @@ -4,7 +4,7 @@ class EmbeddingNet(nn.Module): def __init__(self, backbone=None): - super(EmbeddingNet, self).__init__() + super().__init__() if backbone is None: backbone = models.resnet50(num_classes=128) diff --git a/references/similarity/train.py b/references/similarity/train.py index c8f041acdad..713cbadf280 100644 --- a/references/similarity/train.py +++ b/references/similarity/train.py @@ -31,7 +31,7 @@ def train_epoch(model, optimizer, criterion, data_loader, device, epoch, print_f i += 1 avg_loss = running_loss / print_freq avg_trip = 100.0 * running_frac_pos_triplets / print_freq - print("[{:d}, {:d}] | loss: {:.4f} | % avg hard triplets: {:.2f}%".format(epoch, i, avg_loss, avg_trip)) + print(f"[{epoch:d}, {i:d}] | loss: {avg_loss:.4f} | % avg hard triplets: {avg_trip:.2f}%") running_loss = 0 running_frac_pos_triplets = 0 @@ -77,7 +77,7 @@ def evaluate(model, loader, device): threshold, accuracy = find_best_threshold(dists, targets, device) - print("accuracy: {:.3f}%, threshold: {:.2f}".format(accuracy, threshold)) + print(f"accuracy: {accuracy:.3f}%, threshold: {threshold:.2f}") def save(model, epoch, save_dir, file_name): diff --git a/references/video_classification/train.py b/references/video_classification/train.py index f944cff7794..432ff590b0e 100644 --- a/references/video_classification/train.py +++ b/references/video_classification/train.py @@ -24,7 +24,7 @@ def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, devi metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) metric_logger.add_meter("clips/s", utils.SmoothedValue(window_size=10, fmt="{value:.3f}")) - header = "Epoch: [{}]".format(epoch) + header = f"Epoch: [{epoch}]" for video, target in metric_logger.log_every(data_loader, print_freq, header): start_time = time.time() video, target = video.to(device), target.to(device) @@ -122,7 +122,7 @@ def main(args): transform_train = presets.VideoClassificationPresetTrain((128, 171), (112, 112)) if args.cache_dataset and os.path.exists(cache_path): - print("Loading dataset_train from {}".format(cache_path)) + print(f"Loading dataset_train from {cache_path}") dataset, _ = torch.load(cache_path) dataset.transform = transform_train else: @@ -140,7 +140,7 @@ def main(args): ), ) if args.cache_dataset: - print("Saving dataset_train to {}".format(cache_path)) + print(f"Saving dataset_train to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset, traindir), cache_path) @@ -152,7 +152,7 @@ def main(args): transform_test = presets.VideoClassificationPresetEval((128, 171), (112, 112)) if args.cache_dataset and os.path.exists(cache_path): - print("Loading dataset_test from {}".format(cache_path)) + print(f"Loading dataset_test from {cache_path}") dataset_test, _ = torch.load(cache_path) dataset_test.transform = transform_test else: @@ -170,7 +170,7 @@ def main(args): ), ) if args.cache_dataset: - print("Saving dataset_test to {}".format(cache_path)) + print(f"Saving dataset_test to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset_test, valdir), cache_path) @@ -275,12 +275,12 @@ def main(args): "epoch": epoch, "args": args, } - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") def parse_args(): diff --git a/references/video_classification/utils.py b/references/video_classification/utils.py index 956c4f85239..3c024434542 100644 --- a/references/video_classification/utils.py +++ b/references/video_classification/utils.py @@ -8,7 +8,7 @@ import torch.distributed as dist -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -67,7 +67,7 @@ def __str__(self): ) -class MetricLogger(object): +class MetricLogger: def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter @@ -84,12 +84,12 @@ def __getattr__(self, attr): return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def __str__(self): loss_str = [] for name, meter in self.meters.items(): - loss_str.append("{}: {}".format(name, str(meter))) + loss_str.append(f"{name}: {str(meter)}") return self.delimiter.join(loss_str) def synchronize_between_processes(self): @@ -154,7 +154,7 @@ def log_every(self, iterable, print_freq, header=None): end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("{} Total time: {}".format(header, total_time_str)) + print(f"{header} Total time: {total_time_str}") def accuracy(output, target, topk=(1,)): @@ -246,7 +246,7 @@ def init_distributed_mode(args): torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) + print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank ) diff --git a/setup.py b/setup.py index eb0c6b4bb29..1ee1e61ac17 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,6 @@ import distutils.command.clean import distutils.spawn import glob -import io import os import shutil import subprocess @@ -14,7 +13,7 @@ def read(*names, **kwargs): - with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp: + with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp: return fp.read() @@ -28,7 +27,7 @@ def get_dist(pkgname): cwd = os.path.dirname(os.path.abspath(__file__)) version_txt = os.path.join(cwd, "version.txt") -with open(version_txt, "r") as f: +with open(version_txt) as f: version = f.readline().strip() sha = "Unknown" package_name = "torchvision" @@ -47,8 +46,8 @@ def get_dist(pkgname): def write_version_file(): version_path = os.path.join(cwd, "torchvision", "version.py") with open(version_path, "w") as f: - f.write("__version__ = '{}'\n".format(version)) - f.write("git_version = {}\n".format(repr(sha))) + f.write(f"__version__ = '{version}'\n") + f.write(f"git_version = {repr(sha)}\n") f.write("from torchvision.extension import _check_cuda_version\n") f.write("if _check_cuda_version() > 0:\n") f.write(" cuda = _check_cuda_version()\n") @@ -78,7 +77,7 @@ def find_library(name, vision_include): conda_installed = False lib_folder = None include_folder = None - library_header = "{0}.h".format(name) + library_header = f"{name}.h" # Lookup in TORCHVISION_INCLUDE or in the package file package_path = [os.path.join(this_dir, "torchvision")] @@ -89,7 +88,7 @@ def find_library(name, vision_include): break if not library_found: - print("Running build on conda-build: {0}".format(is_conda_build)) + print(f"Running build on conda-build: {is_conda_build}") if is_conda_build: # Add conda headers/libraries if os.name == "nt": @@ -103,7 +102,7 @@ def find_library(name, vision_include): # Check if using Anaconda to produce wheels conda = distutils.spawn.find_executable("conda") is_conda = conda is not None - print("Running build on conda: {0}".format(is_conda)) + print(f"Running build on conda: {is_conda}") if is_conda: python_executable = sys.executable py_folder = os.path.dirname(python_executable) @@ -119,8 +118,8 @@ def find_library(name, vision_include): if not library_found: if sys.platform == "linux": - library_found = os.path.exists("/usr/include/{0}".format(library_header)) - library_found = library_found or os.path.exists("/usr/local/include/{0}".format(library_header)) + library_found = os.path.exists(f"/usr/include/{library_header}") + library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}") return library_found, conda_installed, include_folder, lib_folder @@ -259,13 +258,13 @@ def get_extensions(): libpng = distutils.spawn.find_executable("libpng-config") pngfix = distutils.spawn.find_executable("pngfix") png_found = libpng is not None or pngfix is not None - print("PNG found: {0}".format(png_found)) + print(f"PNG found: {png_found}") if png_found: if libpng is not None: # Linux / Mac png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE) png_version = png_version.stdout.strip().decode("utf-8") - print("libpng version: {0}".format(png_version)) + print(f"libpng version: {png_version}") png_version = parse_version(png_version) if png_version >= parse_version("1.6.0"): print("Building torchvision with PNG image support") @@ -276,7 +275,7 @@ def get_extensions(): png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE) png_include = png_include.stdout.strip().decode("utf-8") _, png_include = png_include.split("-I") - print("libpng include path: {0}".format(png_include)) + print(f"libpng include path: {png_include}") image_include += [png_include] image_link_flags.append("png") else: @@ -293,7 +292,7 @@ def get_extensions(): # Locating libjpeg (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include) - print("JPEG found: {0}".format(jpeg_found)) + print(f"JPEG found: {jpeg_found}") image_macros += [("PNG_FOUND", str(int(png_found)))] image_macros += [("JPEG_FOUND", str(int(jpeg_found)))] if jpeg_found: @@ -311,7 +310,7 @@ def get_extensions(): and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h")) ) - print("NVJPEG found: {0}".format(nvjpeg_found)) + print(f"NVJPEG found: {nvjpeg_found}") image_macros += [("NVJPEG_FOUND", str(int(nvjpeg_found)))] if nvjpeg_found: print("Building torchvision with NVJPEG image support") @@ -353,7 +352,7 @@ def get_extensions(): print("Error fetching ffmpeg version, ignoring ffmpeg.") has_ffmpeg = False - print("FFmpeg found: {}".format(has_ffmpeg)) + print(f"FFmpeg found: {has_ffmpeg}") if has_ffmpeg: ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"} @@ -387,8 +386,8 @@ def get_extensions(): has_ffmpeg = False if has_ffmpeg: - print("ffmpeg include path: {}".format(ffmpeg_include_dir)) - print("ffmpeg library_dir: {}".format(ffmpeg_library_dir)) + print(f"ffmpeg include path: {ffmpeg_include_dir}") + print(f"ffmpeg library_dir: {ffmpeg_library_dir}") # TorchVision base decoder + video reader video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader") @@ -433,7 +432,7 @@ def get_extensions(): class clean(distutils.command.clean.clean): def run(self): - with open(".gitignore", "r") as f: + with open(".gitignore") as f: ignores = f.read() for wildcard in filter(None, ignores.split("\n")): for filename in glob.glob(wildcard): @@ -447,7 +446,7 @@ def run(self): if __name__ == "__main__": - print("Building wheel {}-{}".format(package_name, version)) + print(f"Building wheel {package_name}-{version}") write_version_file() @@ -473,7 +472,7 @@ def run(self): "scipy": ["scipy"], }, ext_modules=get_extensions(), - python_requires='>=3.6', + python_requires=">=3.6", cmdclass={ "build_ext": BuildExtension.with_options(no_python_abi_suffix=True), "clean": clean, diff --git a/test/common_utils.py b/test/common_utils.py index f782613971c..5a853771301 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -37,7 +37,7 @@ def set_rng_seed(seed): random.seed(seed) -class MapNestedTensorObjectImpl(object): +class MapNestedTensorObjectImpl: def __init__(self, tensor_map_fn): self.tensor_map_fn = tensor_map_fn @@ -152,7 +152,7 @@ def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): else: f = fps[i] data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmpdir, "{}.mp4".format(i)) + name = os.path.join(tmpdir, f"{i}.mp4") names.append(name) io.write_video(name, data, fps=f) @@ -165,7 +165,7 @@ def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None): np_pil_image = np_pil_image[:, :, None] pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) if msg is None: - msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) + msg = f"tensor:\n{tensor} \ndid not equal PIL tensor:\n{pil_tensor}" assert_equal(tensor.cpu(), pil_tensor, msg=msg) diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 646babdda1e..3fb89a6d3da 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -133,7 +133,7 @@ def test_foo(self, config): def maybe_remove_duplicates(configs): try: - return [dict(config_) for config_ in set(tuple(sorted(config.items())) for config in configs)] + return [dict(config_) for config_ in {tuple(sorted(config.items())) for config in configs}] except TypeError: # A TypeError will be raised if a value of any config is not hashable, e.g. a list. In that case duplicate # removal would be a lot more elaborate and we simply bail out. diff --git a/test/preprocess-bench.py b/test/preprocess-bench.py index df557b29197..2eda252c848 100644 --- a/test/preprocess-bench.py +++ b/test/preprocess-bench.py @@ -26,7 +26,7 @@ if args.accimage: torchvision.set_image_backend("accimage") - print("Using {}".format(torchvision.get_image_backend())) + print(f"Using {torchvision.get_image_backend()}") # Data loading code transform = transforms.Compose( diff --git a/test/test_backbone_utils.py b/test/test_backbone_utils.py index 63687e42f57..9b46bdd5288 100644 --- a/test/test_backbone_utils.py +++ b/test/test_backbone_utils.py @@ -144,7 +144,7 @@ def test_forward_backward(self, model_name): model, train_return_nodes=train_return_nodes, eval_return_nodes=eval_return_nodes ) out = model(self.inp) - sum([o.mean() for o in out.values()]).backward() + sum(o.mean() for o in out.values()).backward() def test_feature_extraction_methods_equivalence(self): model = models.resnet18(**self.model_defaults).eval() @@ -176,7 +176,7 @@ def test_jit_forward_backward(self, model_name): ) model = torch.jit.script(model) fgn_out = model(self.inp) - sum([o.mean() for o in fgn_out.values()]).backward() + sum(o.mean() for o in fgn_out.values()).backward() def test_train_eval(self): class TestModel(torch.nn.Module): diff --git a/test/test_datasets.py b/test/test_datasets.py index d2dc4ea6958..575e5ccb811 100644 --- a/test/test_datasets.py +++ b/test/test_datasets.py @@ -654,7 +654,7 @@ def _create_image_set_files(self, root, name, is_test_set): shutil.copytree(src, root / "Segmentation") num_images = max(itertools.chain(*idcs.values())) + 1 - num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()]) + num_images_per_image_set = {image_set: len(idcs_) for image_set, idcs_ in idcs.items()} return num_images, num_images_per_image_set def _create_image_set_file(self, root, image_set, idcs): @@ -1174,7 +1174,7 @@ def _create_split_files(self, root): self._create_split_file(root, split, idcs) num_images = max(itertools.chain(*splits.values())) + 1 - num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()]) + num_images_per_split = {split: len(idcs) for split, idcs in splits.items()} return num_images, num_images_per_split def _create_split_file(self, root, name, idcs): diff --git a/test/test_datasets_utils.py b/test/test_datasets_utils.py index bece7dd1e9d..c3e63fb7f5e 100644 --- a/test/test_datasets_utils.py +++ b/test/test_datasets_utils.py @@ -129,7 +129,7 @@ def create_compressed(root, content="this is the content"): assert os.path.exists(file) - with open(file, "r") as fh: + with open(file) as fh: assert fh.read() == content def test_decompress_no_compression(self): @@ -179,7 +179,7 @@ def create_archive(root, content="this is the content"): assert os.path.exists(file) - with open(file, "r") as fh: + with open(file) as fh: assert fh.read() == content @pytest.mark.parametrize( @@ -205,7 +205,7 @@ def create_archive(root, extension, mode, content="this is the content"): assert os.path.exists(file) - with open(file, "r") as fh: + with open(file) as fh: assert fh.read() == content def test_verify_str_arg(self): diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 7e64b9752bf..48f2a29e767 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -177,11 +177,11 @@ def test_identity_map(self, device, height, width, dt): # 1) identity map out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + assert_equal(tensor, out_tensor, msg=f"{out_tensor[0, :5, :5]} vs {tensor[0, :5, :5]}") out_tensor = self.scripted_affine( tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + assert_equal(tensor, out_tensor, msg=f"{out_tensor[0, :5, :5]} vs {tensor[0, :5, :5]}") @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("height, width", [(26, 26)]) @@ -936,7 +936,7 @@ def test_pad(device, dt, pad, config): if pad_tensor_8b.dtype != torch.uint8: pad_tensor_8b = pad_tensor_8b.to(torch.uint8) - _assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, config)) + _assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg=f"{pad}, {config}") if isinstance(pad, int): script_pad = [ @@ -945,7 +945,7 @@ def test_pad(device, dt, pad, config): else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **config) - assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, config)) + assert_equal(pad_tensor, pad_tensor_script, msg=f"{pad}, {config}") _test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **config) @@ -958,7 +958,7 @@ def test_resized_crop(device, mode): tensor, _ = _create_data(26, 36, device=device) out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + assert_equal(tensor, out_tensor, msg=f"{out_tensor[0, :5, :5]} vs {tensor[0, :5, :5]}") # 2) resize by half and crop a TL corner tensor, _ = _create_data(26, 36, device=device) @@ -967,7 +967,7 @@ def test_resized_crop(device, mode): assert_equal( expected_out_tensor, out_tensor, - msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), + msg=f"{expected_out_tensor[0, :10, :10]} vs {out_tensor[0, :10, :10]}", ) batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device) @@ -1126,7 +1126,7 @@ def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn): _ksize = (ksize, ksize) if isinstance(ksize, int) else ksize _sigma = sigma[0] if sigma is not None else None shape = tensor.shape - gt_key = "{}_{}_{}__{}_{}_{}".format(shape[-2], shape[-1], shape[-3], _ksize[0], _ksize[1], _sigma) + gt_key = f"{shape[-2]}_{shape[-1]}_{shape[-3]}__{_ksize[0]}_{_ksize[1]}_{_sigma}" if gt_key not in true_cv2_results: return @@ -1135,7 +1135,7 @@ def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn): ) out = fn(tensor, kernel_size=ksize, sigma=sigma) - torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma)) + torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg=f"{ksize}, {sigma}") @pytest.mark.parametrize("device", cpu_and_gpu()) diff --git a/test/test_image.py b/test/test_image.py index 9c6a73b8362..c4ee7f2a4ae 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -219,7 +219,7 @@ def test_write_png(img_path, tmpdir): img_pil = img_pil.permute(2, 0, 1) filename, _ = os.path.splitext(os.path.basename(img_path)) - torch_png = os.path.join(tmpdir, "{0}_torch.png".format(filename)) + torch_png = os.path.join(tmpdir, f"{filename}_torch.png") write_png(img_pil, torch_png, compression_level=6) saved_image = torch.from_numpy(np.array(Image.open(torch_png))) saved_image = saved_image.permute(2, 0, 1) @@ -426,7 +426,7 @@ def test_encode_jpeg_reference(img_path): dirname = os.path.dirname(img_path) filename, _ = os.path.splitext(os.path.basename(img_path)) write_folder = os.path.join(dirname, "jpeg_write") - expected_file = os.path.join(write_folder, "{0}_pil.jpg".format(filename)) + expected_file = os.path.join(write_folder, f"{filename}_pil.jpg") img = decode_jpeg(read_file(img_path)) with open(expected_file, "rb") as f: @@ -450,8 +450,8 @@ def test_write_jpeg_reference(img_path, tmpdir): basedir = os.path.dirname(img_path) filename, _ = os.path.splitext(os.path.basename(img_path)) - torch_jpeg = os.path.join(tmpdir, "{0}_torch.jpg".format(filename)) - pil_jpeg = os.path.join(basedir, "jpeg_write", "{0}_pil.jpg".format(filename)) + torch_jpeg = os.path.join(tmpdir, f"{filename}_torch.jpg") + pil_jpeg = os.path.join(basedir, "jpeg_write", f"{filename}_pil.jpg") write_jpeg(img, torch_jpeg, quality=75) diff --git a/test/test_models.py b/test/test_models.py index 659198f42f4..a2dc1a72b8a 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -75,12 +75,12 @@ def _assert_expected(output, name, prec): if ACCEPT: filename = {os.path.basename(expected_file)} - print("Accepting updated output for {}:\n\n{}".format(filename, output)) + print(f"Accepting updated output for {filename}:\n\n{output}") torch.save(output, expected_file) MAX_PICKLE_SIZE = 50 * 1000 # 50 KB binary_size = os.path.getsize(expected_file) if binary_size > MAX_PICKLE_SIZE: - raise RuntimeError("The output for {}, is larger than 50kb".format(filename)) + raise RuntimeError(f"The output for {filename}, is larger than 50kb") else: expected = torch.load(expected_file) rtol = atol = prec @@ -284,11 +284,11 @@ def test_memory_efficient_densenet(model_name): model1 = models.__dict__[model_name](num_classes=50, memory_efficient=True) params = model1.state_dict() - num_params = sum([x.numel() for x in model1.parameters()]) + num_params = sum(x.numel() for x in model1.parameters()) model1.eval() out1 = model1(x) out1.sum().backward() - num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None]) + num_grad = sum(x.grad.numel() for x in model1.parameters() if x.grad is not None) model2 = models.__dict__[model_name](num_classes=50, memory_efficient=False) model2.load_state_dict(params) @@ -435,8 +435,8 @@ def test_generalizedrcnn_transform_repr(): # Check integrity of object __repr__ attribute expected_string = "GeneralizedRCNNTransform(" _indent = "\n " - expected_string += "{0}Normalize(mean={1}, std={2})".format(_indent, image_mean, image_std) - expected_string += "{0}Resize(min_size=({1},), max_size={2}, ".format(_indent, min_size, max_size) + expected_string += f"{_indent}Normalize(mean={image_mean}, std={image_std})" + expected_string += f"{_indent}Resize(min_size=({min_size},), max_size={max_size}, " expected_string += "mode='bilinear')\n)" assert t.__repr__() == expected_string diff --git a/test/test_onnx.py b/test/test_onnx.py index c81d490a882..830699ab5ee 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -78,7 +78,7 @@ def to_numpy(tensor): ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) # compute onnxruntime output prediction - ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) + ort_inputs = {ort_session.get_inputs()[i].name: inpt for i, inpt in enumerate(inputs)} ort_outs = ort_session.run(None, ort_inputs) for i in range(0, len(outputs)): @@ -185,7 +185,7 @@ def test_roi_pool(self): def test_resize_images(self): class TransformModule(torch.nn.Module): def __init__(self_module): - super(TransformModule, self_module).__init__() + super().__init__() self_module.transform = self._init_test_generalized_rcnn_transform() def forward(self_module, images): @@ -200,7 +200,7 @@ def forward(self_module, images): def test_transform_images(self): class TransformModule(torch.nn.Module): def __init__(self_module): - super(TransformModule, self_module).__init__() + super().__init__() self_module.transform = self._init_test_generalized_rcnn_transform() def forward(self_module, images): @@ -301,7 +301,7 @@ def test_rpn(self): class RPNModule(torch.nn.Module): def __init__(self_module): - super(RPNModule, self_module).__init__() + super().__init__() self_module.rpn = self._init_test_rpn() def forward(self_module, images, features): @@ -335,7 +335,7 @@ def forward(self_module, images, features): def test_multi_scale_roi_align(self): class TransformModule(torch.nn.Module): def __init__(self): - super(TransformModule, self).__init__() + super().__init__() self.model = ops.MultiScaleRoIAlign(["feat1", "feat2"], 3, 2) self.image_sizes = [(512, 512)] @@ -371,7 +371,7 @@ def forward(self, input, boxes): def test_roi_heads(self): class RoiHeadsModule(torch.nn.Module): def __init__(self_module): - super(RoiHeadsModule, self_module).__init__() + super().__init__() self_module.transform = self._init_test_generalized_rcnn_transform() self_module.rpn = self._init_test_rpn() self_module.roi_heads = self._init_test_roi_heads_faster_rcnn() diff --git a/test/test_ops.py b/test/test_ops.py index 64329936b72..2d2673acd88 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -709,7 +709,7 @@ def test_forward(self, device, contiguous, batch_sz, dtype=None): expected = self.expected_fn(x, weight, offset, mask, bias, stride=stride, padding=padding, dilation=dilation) torch.testing.assert_close( - res.to(expected), expected, rtol=tol, atol=tol, msg="\nres:\n{}\nexpected:\n{}".format(res, expected) + res.to(expected), expected, rtol=tol, atol=tol, msg=f"\nres:\n{res}\nexpected:\n{expected}" ) # no modulation test @@ -717,7 +717,7 @@ def test_forward(self, device, contiguous, batch_sz, dtype=None): expected = self.expected_fn(x, weight, offset, None, bias, stride=stride, padding=padding, dilation=dilation) torch.testing.assert_close( - res.to(expected), expected, rtol=tol, atol=tol, msg="\nres:\n{}\nexpected:\n{}".format(res, expected) + res.to(expected), expected, rtol=tol, atol=tol, msg=f"\nres:\n{res}\nexpected:\n{expected}" ) def test_wrong_sizes(self): diff --git a/test/test_transforms.py b/test/test_transforms.py index 3712e592cc4..b4bfe963c0e 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -114,7 +114,7 @@ def test_dtype_int_to_int(self, input_dtype, output_dtype): output_image, rtol=0.0, atol=1e-6, - msg="{} vs {}".format(output_image_script, output_image), + msg=f"{output_image_script} vs {output_image}", ) actual_min, actual_max = output_image.tolist() @@ -368,7 +368,7 @@ def test_resize(height, width, osize, max_size): t = transforms.Resize(osize, max_size=max_size) result = t(img) - msg = "{}, {} - {} - {}".format(height, width, osize, max_size) + msg = f"{height}, {width} - {osize} - {max_size}" osize = osize[0] if isinstance(osize, (list, tuple)) else osize # If size is an int, smaller edge of the image will be matched to this number. # i.e, if height > width, then image will be rescaled to (size * height / width, size). @@ -469,11 +469,11 @@ def test_pad_with_tuple_of_pad_values(self): width = random.randint(10, 32) * 2 img = transforms.ToPILImage()(torch.ones(3, height, width)) - padding = tuple([random.randint(1, 20) for _ in range(2)]) + padding = tuple(random.randint(1, 20) for _ in range(2)) output = transforms.Pad(padding)(img) assert output.size == (width + padding[0] * 2, height + padding[1] * 2) - padding = tuple([random.randint(1, 20) for _ in range(4)]) + padding = tuple(random.randint(1, 20) for _ in range(4)) output = transforms.Pad(padding)(img) assert output.size[0] == width + padding[0] + padding[2] assert output.size[1] == height + padding[1] + padding[3] @@ -1823,7 +1823,7 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height): assert_equal( output_tensor, output_pil, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size), + msg=f"image_size: {input_image_size} crop_size: {crop_size}", ) # Check if content in center of both image and cropped output is same. @@ -2210,9 +2210,9 @@ def _test_transformation(self, angle, translate, scale, shear, pil_image, input_ np_result = np.array(result) n_diff_pixels = np.sum(np_result != true_result) / 3 # Accept 3 wrong pixels - error_msg = "angle={}, translate={}, scale={}, shear={}\n".format( - angle, translate, scale, shear - ) + "n diff pixels={}\n".format(n_diff_pixels) + error_msg = ( + f"angle={angle}, translate={translate}, scale={scale}, shear={shear}\n" + f"n diff pixels={n_diff_pixels}\n" + ) assert n_diff_pixels < 3, error_msg def test_transformation_discrete(self, pil_image, input_img): diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 5ee1c738a77..9158a8cc450 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -366,7 +366,7 @@ def test_x_crop_save(method, tmpdir): ] ) scripted_fn = torch.jit.script(fn) - scripted_fn.save(os.path.join(tmpdir, "t_op_list_{}.pt".format(method))) + scripted_fn.save(os.path.join(tmpdir, f"t_op_list_{method}.pt")) class TestResize: @@ -811,7 +811,7 @@ def test_compose(device): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + assert_equal(transformed_tensor, transformed_tensor_script, msg=f"{transforms}") t = T.Compose( [ @@ -849,7 +849,7 @@ def test_random_apply(device): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + assert_equal(transformed_tensor, transformed_tensor_script, msg=f"{transforms}") if device == "cpu": # Can't check this twice, otherwise diff --git a/test/test_transforms_video.py b/test/test_transforms_video.py index a3bd8528abf..4e5b6b5492d 100644 --- a/test/test_transforms_video.py +++ b/test/test_transforms_video.py @@ -165,7 +165,7 @@ def test_random_horizontal_flip_video(self): random_state = random.getstate() random.seed(42) clip = torch.rand((3, 4, 112, 112), dtype=torch.float) - hclip = clip.flip((-1)) + hclip = clip.flip(-1) num_samples = 250 num_horizontal = 0 diff --git a/torchvision/__init__.py b/torchvision/__init__.py index 03dc20c5c54..0940cf98b4a 100644 --- a/torchvision/__init__.py +++ b/torchvision/__init__.py @@ -43,7 +43,7 @@ def set_image_backend(backend): """ global _image_backend if backend not in ["PIL", "accimage"]: - raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'".format(backend)) + raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'") _image_backend = backend diff --git a/torchvision/datasets/caltech.py b/torchvision/datasets/caltech.py index f3596ed0dc0..38f086fd04d 100644 --- a/torchvision/datasets/caltech.py +++ b/torchvision/datasets/caltech.py @@ -40,9 +40,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(Caltech101, self).__init__( - os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform - ) + super().__init__(os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform) os.makedirs(self.root, exist_ok=True) if not isinstance(target_type, list): target_type = [target_type] @@ -90,7 +88,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: self.root, "101_ObjectCategories", self.categories[self.y[index]], - "image_{:04d}.jpg".format(self.index[index]), + f"image_{self.index[index]:04d}.jpg", ) ) @@ -104,7 +102,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: self.root, "Annotations", self.annotation_categories[self.y[index]], - "annotation_{:04d}.mat".format(self.index[index]), + f"annotation_{self.index[index]:04d}.mat", ) ) target.append(data["obj_contour"]) @@ -167,9 +165,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(Caltech256, self).__init__( - os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform - ) + super().__init__(os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform) os.makedirs(self.root, exist_ok=True) if download: @@ -205,7 +201,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: self.root, "256_ObjectCategories", self.categories[self.y[index]], - "{:03d}_{:04d}.jpg".format(self.y[index] + 1, self.index[index]), + f"{self.y[index] + 1:03d}_{self.index[index]:04d}.jpg", ) ) diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py index 2c954c4d719..327e862ea5e 100644 --- a/torchvision/datasets/celeba.py +++ b/torchvision/datasets/celeba.py @@ -66,7 +66,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(CelebA, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.split = split if isinstance(target_type, list): self.target_type = target_type @@ -166,7 +166,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: target.append(self.landmarks_align[index, :]) else: # TODO: refactor with utils.verify_str_arg - raise ValueError('Target type "{}" is not recognized.'.format(t)) + raise ValueError(f'Target type "{t}" is not recognized.') if self.transform is not None: X = self.transform(X) diff --git a/torchvision/datasets/cifar.py b/torchvision/datasets/cifar.py index 1d124b32194..deabd445d22 100644 --- a/torchvision/datasets/cifar.py +++ b/torchvision/datasets/cifar.py @@ -58,7 +58,7 @@ def __init__( download: bool = False, ) -> None: - super(CIFAR10, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.train = train # training set or test set diff --git a/torchvision/datasets/cityscapes.py b/torchvision/datasets/cityscapes.py index cfc3e8bab71..d2c120a8fe8 100644 --- a/torchvision/datasets/cityscapes.py +++ b/torchvision/datasets/cityscapes.py @@ -111,7 +111,7 @@ def __init__( target_transform: Optional[Callable] = None, transforms: Optional[Callable] = None, ) -> None: - super(Cityscapes, self).__init__(root, transforms, transform, target_transform) + super().__init__(root, transforms, transform, target_transform) self.mode = "gtFine" if mode == "fine" else "gtCoarse" self.images_dir = os.path.join(self.root, "leftImg8bit", split) self.targets_dir = os.path.join(self.root, self.mode, split) @@ -206,16 +206,16 @@ def extra_repr(self) -> str: return "\n".join(lines).format(**self.__dict__) def _load_json(self, path: str) -> Dict[str, Any]: - with open(path, "r") as file: + with open(path) as file: data = json.load(file) return data def _get_target_suffix(self, mode: str, target_type: str) -> str: if target_type == "instance": - return "{}_instanceIds.png".format(mode) + return f"{mode}_instanceIds.png" elif target_type == "semantic": - return "{}_labelIds.png".format(mode) + return f"{mode}_labelIds.png" elif target_type == "color": - return "{}_color.png".format(mode) + return f"{mode}_color.png" else: - return "{}_polygons.json".format(mode) + return f"{mode}_polygons.json" diff --git a/torchvision/datasets/fakedata.py b/torchvision/datasets/fakedata.py index 2c95cf488c1..244af634989 100644 --- a/torchvision/datasets/fakedata.py +++ b/torchvision/datasets/fakedata.py @@ -31,9 +31,7 @@ def __init__( target_transform: Optional[Callable] = None, random_offset: int = 0, ) -> None: - super(FakeData, self).__init__( - None, transform=transform, target_transform=target_transform # type: ignore[arg-type] - ) + super().__init__(None, transform=transform, target_transform=target_transform) # type: ignore[arg-type] self.size = size self.num_classes = num_classes self.image_size = image_size @@ -49,7 +47,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: """ # create random image that is consistent with the index id if index >= len(self): - raise IndexError("{} index out of range".format(self.__class__.__name__)) + raise IndexError(f"{self.__class__.__name__} index out of range") rng_state = torch.get_rng_state() torch.manual_seed(index + self.random_offset) img = torch.randn(*self.image_size) diff --git a/torchvision/datasets/flickr.py b/torchvision/datasets/flickr.py index 31cb68d4937..1b4c1e9fc31 100644 --- a/torchvision/datasets/flickr.py +++ b/torchvision/datasets/flickr.py @@ -13,7 +13,7 @@ class Flickr8kParser(HTMLParser): """Parser for extracting captions from the Flickr8k dataset web page.""" def __init__(self, root: str) -> None: - super(Flickr8kParser, self).__init__() + super().__init__() self.root = root @@ -71,7 +71,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - super(Flickr8k, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.ann_file = os.path.expanduser(ann_file) # Read annotations and store in a dict @@ -127,7 +127,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - super(Flickr30k, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.ann_file = os.path.expanduser(ann_file) # Read annotations and store in a dict diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py index fedf4a35539..0bdb3ca0d8b 100644 --- a/torchvision/datasets/folder.py +++ b/torchvision/datasets/folder.py @@ -140,7 +140,7 @@ def __init__( target_transform: Optional[Callable] = None, is_valid_file: Optional[Callable[[str], bool]] = None, ) -> None: - super(DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) classes, class_to_idx = self.find_classes(self.root) samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file) @@ -254,7 +254,7 @@ def accimage_loader(path: str) -> Any: try: return accimage.Image(path) - except IOError: + except OSError: # Potentially a decoding problem, fall back to PIL.Image return pil_loader(path) @@ -306,7 +306,7 @@ def __init__( loader: Callable[[str], Any] = default_loader, is_valid_file: Optional[Callable[[str], bool]] = None, ): - super(ImageFolder, self).__init__( + super().__init__( root, loader, IMG_EXTENSIONS if is_valid_file is None else None, diff --git a/torchvision/datasets/hmdb51.py b/torchvision/datasets/hmdb51.py index fe12c0d0b47..19c00866191 100644 --- a/torchvision/datasets/hmdb51.py +++ b/torchvision/datasets/hmdb51.py @@ -72,9 +72,9 @@ def __init__( _video_min_dimension: int = 0, _audio_samples: int = 0, ) -> None: - super(HMDB51, self).__init__(root) + super().__init__(root) if fold not in (1, 2, 3): - raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + raise ValueError(f"fold should be between 1 and 3, got {fold}") extensions = ("avi",) self.classes, class_to_idx = find_classes(self.root) @@ -113,7 +113,7 @@ def metadata(self) -> Dict[str, Any]: def _select_fold(self, video_list: List[str], annotations_dir: str, fold: int, train: bool) -> List[int]: target_tag = self.TRAIN_TAG if train else self.TEST_TAG - split_pattern_name = "*test_split{}.txt".format(fold) + split_pattern_name = f"*test_split{fold}.txt" split_pattern_path = os.path.join(annotations_dir, split_pattern_name) annotation_paths = glob.glob(split_pattern_path) selected_files = set() diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py index 0fdb3395a5e..6cd8ee619a3 100644 --- a/torchvision/datasets/imagenet.py +++ b/torchvision/datasets/imagenet.py @@ -58,7 +58,7 @@ def __init__(self, root: str, split: str = "train", download: Optional[str] = No self.parse_archives() wnid_to_classes = load_meta_file(self.root)[0] - super(ImageNet, self).__init__(self.split_folder, **kwargs) + super().__init__(self.split_folder, **kwargs) self.root = root self.wnids = self.classes @@ -132,7 +132,7 @@ def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, Tuple[st def parse_val_groundtruth_txt(devkit_root: str) -> List[int]: file = os.path.join(devkit_root, "data", "ILSVRC2012_validation_ground_truth.txt") - with open(file, "r") as txtfh: + with open(file) as txtfh: val_idcs = txtfh.readlines() return [int(val_idx) for val_idx in val_idcs] @@ -215,7 +215,7 @@ def parse_val_archive( val_root = os.path.join(root, folder) extract_archive(os.path.join(root, file), val_root) - images = sorted([os.path.join(val_root, image) for image in os.listdir(val_root)]) + images = sorted(os.path.join(val_root, image) for image in os.listdir(val_root)) for wnid in set(wnids): os.mkdir(os.path.join(val_root, wnid)) diff --git a/torchvision/datasets/inaturalist.py b/torchvision/datasets/inaturalist.py index 1e2d09d39f8..2191c3f487e 100644 --- a/torchvision/datasets/inaturalist.py +++ b/torchvision/datasets/inaturalist.py @@ -74,9 +74,7 @@ def __init__( ) -> None: self.version = verify_str_arg(version, "version", DATASET_URLS.keys()) - super(INaturalist, self).__init__( - os.path.join(root, version), transform=transform, target_transform=target_transform - ) + super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform) os.makedirs(root, exist_ok=True) if download: diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py index 058fcca29e6..64111e4f475 100644 --- a/torchvision/datasets/kinetics.py +++ b/torchvision/datasets/kinetics.py @@ -175,7 +175,7 @@ def _download_videos(self) -> None: split_url_filepath = path.join(file_list_path, path.basename(split_url)) if not check_integrity(split_url_filepath): download_url(split_url, file_list_path) - list_video_urls = open(split_url_filepath, "r") + list_video_urls = open(split_url_filepath) if self.num_download_workers == 1: for line in list_video_urls.readlines(): @@ -309,7 +309,7 @@ def __init__( "Kinetics400. Please use Kinetics instead." ) - super(Kinetics400, self).__init__( + super().__init__( root=root, frames_per_clip=frames_per_clip, _legacy=True, diff --git a/torchvision/datasets/lfw.py b/torchvision/datasets/lfw.py index 77a2b41ba35..6720c29ae9d 100644 --- a/torchvision/datasets/lfw.py +++ b/torchvision/datasets/lfw.py @@ -39,9 +39,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ): - super(_LFW, self).__init__( - os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform - ) + super().__init__(os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform) self.image_set = verify_str_arg(image_set.lower(), "image_set", self.file_dict.keys()) images_dir, self.filename, self.md5 = self.file_dict[self.image_set] @@ -122,14 +120,14 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ): - super(LFWPeople, self).__init__(root, split, image_set, "people", transform, target_transform, download) + super().__init__(root, split, image_set, "people", transform, target_transform, download) self.class_to_idx = self._get_classes() self.data, self.targets = self._get_people() def _get_people(self): data, targets = [], [] - with open(os.path.join(self.root, self.labels_file), "r") as f: + with open(os.path.join(self.root, self.labels_file)) as f: lines = f.readlines() n_folds, s = (int(lines[0]), 1) if self.split == "10fold" else (1, 0) @@ -146,7 +144,7 @@ def _get_people(self): return data, targets def _get_classes(self): - with open(os.path.join(self.root, self.names), "r") as f: + with open(os.path.join(self.root, self.names)) as f: lines = f.readlines() names = [line.strip().split()[0] for line in lines] class_to_idx = {name: i for i, name in enumerate(names)} @@ -172,7 +170,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: return img, target def extra_repr(self) -> str: - return super().extra_repr() + "\nClasses (identities): {}".format(len(self.class_to_idx)) + return super().extra_repr() + f"\nClasses (identities): {len(self.class_to_idx)}" class LFWPairs(_LFW): @@ -204,13 +202,13 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ): - super(LFWPairs, self).__init__(root, split, image_set, "pairs", transform, target_transform, download) + super().__init__(root, split, image_set, "pairs", transform, target_transform, download) self.pair_names, self.data, self.targets = self._get_pairs(self.images_dir) def _get_pairs(self, images_dir): pair_names, data, targets = [], [], [] - with open(os.path.join(self.root, self.labels_file), "r") as f: + with open(os.path.join(self.root, self.labels_file)) as f: lines = f.readlines() if self.split == "10fold": n_folds, n_pairs = lines[0].split("\t") diff --git a/torchvision/datasets/lsun.py b/torchvision/datasets/lsun.py index 5d4bcf948d7..3079e530371 100644 --- a/torchvision/datasets/lsun.py +++ b/torchvision/datasets/lsun.py @@ -18,7 +18,7 @@ def __init__( ) -> None: import lmdb - super(LSUNClass, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False) with self.env.begin(write=False) as txn: @@ -77,7 +77,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - super(LSUN, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.classes = self._verify_classes(classes) # for each class, create an LSUNClassDataset diff --git a/torchvision/datasets/mnist.py b/torchvision/datasets/mnist.py index 5445730fb23..5aac1cebca8 100644 --- a/torchvision/datasets/mnist.py +++ b/torchvision/datasets/mnist.py @@ -87,7 +87,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(MNIST, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.train = train # training set or test set if self._check_legacy_exist(): @@ -180,18 +180,18 @@ def download(self) -> None: # download files for filename, md5 in self.resources: for mirror in self.mirrors: - url = "{}{}".format(mirror, filename) + url = f"{mirror}{filename}" try: - print("Downloading {}".format(url)) + print(f"Downloading {url}") download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5) except URLError as error: - print("Failed to download (trying next):\n{}".format(error)) + print(f"Failed to download (trying next):\n{error}") continue finally: print() break else: - raise RuntimeError("Error downloading {}".format(filename)) + raise RuntimeError(f"Error downloading {filename}") def extra_repr(self) -> str: return "Split: {}".format("Train" if self.train is True else "Test") @@ -292,16 +292,16 @@ def __init__(self, root: str, split: str, **kwargs: Any) -> None: self.split = verify_str_arg(split, "split", self.splits) self.training_file = self._training_file(split) self.test_file = self._test_file(split) - super(EMNIST, self).__init__(root, **kwargs) + super().__init__(root, **kwargs) self.classes = self.classes_split_dict[self.split] @staticmethod def _training_file(split) -> str: - return "training_{}.pt".format(split) + return f"training_{split}.pt" @staticmethod def _test_file(split) -> str: - return "test_{}.pt".format(split) + return f"test_{split}.pt" @property def _file_prefix(self) -> str: @@ -423,7 +423,7 @@ def __init__( self.data_file = what + ".pt" self.training_file = self.data_file self.test_file = self.data_file - super(QMNIST, self).__init__(root, train, **kwargs) + super().__init__(root, train, **kwargs) @property def images_file(self) -> str: @@ -481,7 +481,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: return img, target def extra_repr(self) -> str: - return "Split: {}".format(self.what) + return f"Split: {self.what}" def get_int(b: bytes) -> int: diff --git a/torchvision/datasets/omniglot.py b/torchvision/datasets/omniglot.py index 0a6577beaae..1607b7b3060 100644 --- a/torchvision/datasets/omniglot.py +++ b/torchvision/datasets/omniglot.py @@ -39,7 +39,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(Omniglot, self).__init__(join(root, self.folder), transform=transform, target_transform=target_transform) + super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform) self.background = background if download: @@ -51,7 +51,7 @@ def __init__( self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) self._characters: List[str] = sum( - [[join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets], [] + ([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), [] ) self._character_images = [ [(image, idx) for image in list_files(join(self.target_folder, character), ".png")] diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py index fe8134fe2c8..da1b0a9716d 100644 --- a/torchvision/datasets/phototour.py +++ b/torchvision/datasets/phototour.py @@ -89,11 +89,11 @@ class PhotoTour(VisionDataset): def __init__( self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False ) -> None: - super(PhotoTour, self).__init__(root, transform=transform) + super().__init__(root, transform=transform) self.name = name self.data_dir = os.path.join(self.root, name) - self.data_down = os.path.join(self.root, "{}.zip".format(name)) - self.data_file = os.path.join(self.root, "{}.pt".format(name)) + self.data_down = os.path.join(self.root, f"{name}.zip") + self.data_file = os.path.join(self.root, f"{name}.pt") self.train = train self.mean = self.means[name] @@ -139,7 +139,7 @@ def _check_downloaded(self) -> bool: def download(self) -> None: if self._check_datafile_exists(): - print("# Found cached data {}".format(self.data_file)) + print(f"# Found cached data {self.data_file}") return if not self._check_downloaded(): @@ -151,7 +151,7 @@ def download(self) -> None: download_url(url, self.root, filename, md5) - print("# Extracting data {}\n".format(self.data_down)) + print(f"# Extracting data {self.data_down}\n") import zipfile @@ -162,7 +162,7 @@ def download(self) -> None: def cache(self) -> None: # process and save as torch files - print("# Caching data {}".format(self.data_file)) + print(f"# Caching data {self.data_file}") dataset = ( read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), @@ -209,7 +209,7 @@ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: """Return a Tensor containing the list of labels Read the file and keep only the ID of the 3D point. """ - with open(os.path.join(data_dir, info_file), "r") as f: + with open(os.path.join(data_dir, info_file)) as f: labels = [int(line.split()[0]) for line in f] return torch.LongTensor(labels) @@ -220,7 +220,7 @@ def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor: Matches are represented with a 1, non matches with a 0. """ matches = [] - with open(os.path.join(data_dir, matches_file), "r") as f: + with open(os.path.join(data_dir, matches_file)) as f: for line in f: line_split = line.split() matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])]) diff --git a/torchvision/datasets/places365.py b/torchvision/datasets/places365.py index 648e0d604ba..dd11d7331ae 100644 --- a/torchvision/datasets/places365.py +++ b/torchvision/datasets/places365.py @@ -117,7 +117,7 @@ def process(line: str) -> Tuple[str, int]: if not self._check_integrity(file, md5, download): self.download_devkit() - with open(file, "r") as fh: + with open(file) as fh: class_to_idx = dict(process(line) for line in fh) return sorted(class_to_idx.keys()), class_to_idx @@ -132,7 +132,7 @@ def process(line: str, sep="/") -> Tuple[str, int]: if not self._check_integrity(file, md5, download): self.download_devkit() - with open(file, "r") as fh: + with open(file) as fh: images = [process(line) for line in fh] _, targets = zip(*images) diff --git a/torchvision/datasets/sbd.py b/torchvision/datasets/sbd.py index 889dfc3a0be..d1f2f3016a2 100644 --- a/torchvision/datasets/sbd.py +++ b/torchvision/datasets/sbd.py @@ -65,7 +65,7 @@ def __init__( except ImportError: raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: " "pip install scipy") - super(SBDataset, self).__init__(root, transforms) + super().__init__(root, transforms) self.image_set = verify_str_arg(image_set, "image_set", ("train", "val", "train_noval")) self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries")) self.num_classes = 20 @@ -87,7 +87,7 @@ def __init__( split_f = os.path.join(sbd_root, image_set.rstrip("\n") + ".txt") - with open(os.path.join(split_f), "r") as fh: + with open(os.path.join(split_f)) as fh: file_names = [x.strip() for x in fh.readlines()] self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] diff --git a/torchvision/datasets/sbu.py b/torchvision/datasets/sbu.py index 53c6218a7de..9e4cb7a94eb 100644 --- a/torchvision/datasets/sbu.py +++ b/torchvision/datasets/sbu.py @@ -33,7 +33,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = True, ) -> None: - super(SBU, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) if download: self.download() diff --git a/torchvision/datasets/semeion.py b/torchvision/datasets/semeion.py index b97918a6292..b4407b779d2 100644 --- a/torchvision/datasets/semeion.py +++ b/torchvision/datasets/semeion.py @@ -35,7 +35,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = True, ) -> None: - super(SEMEION, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) if download: self.download() diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py index 20ebbc3b0ee..354e583c3d2 100644 --- a/torchvision/datasets/stl10.py +++ b/torchvision/datasets/stl10.py @@ -53,7 +53,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(STL10, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.split = verify_str_arg(split, "split", self.splits) self.folds = self._verify_folds(folds) @@ -167,7 +167,7 @@ def __load_folds(self, folds: Optional[int]) -> None: if folds is None: return path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file) - with open(path_to_folds, "r") as f: + with open(path_to_folds) as f: str_idx = f.read().splitlines()[folds] list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ") self.data = self.data[list_idx, :, :, :] diff --git a/torchvision/datasets/svhn.py b/torchvision/datasets/svhn.py index f5c6087b778..444bc59ca28 100644 --- a/torchvision/datasets/svhn.py +++ b/torchvision/datasets/svhn.py @@ -60,7 +60,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(SVHN, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) self.split = verify_str_arg(split, "split", tuple(self.split_list.keys())) self.url = self.split_list[split][0] self.filename = self.split_list[split][1] diff --git a/torchvision/datasets/ucf101.py b/torchvision/datasets/ucf101.py index dbe9b22e603..950bbc076d2 100644 --- a/torchvision/datasets/ucf101.py +++ b/torchvision/datasets/ucf101.py @@ -65,9 +65,9 @@ def __init__( _video_min_dimension: int = 0, _audio_samples: int = 0, ) -> None: - super(UCF101, self).__init__(root) + super().__init__(root) if not 1 <= fold <= 3: - raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + raise ValueError(f"fold should be between 1 and 3, got {fold}") extensions = ("avi",) self.fold = fold @@ -102,10 +102,10 @@ def metadata(self) -> Dict[str, Any]: def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]: name = "train" if train else "test" - name = "{}list{:02d}.txt".format(name, fold) + name = f"{name}list{fold:02d}.txt" f = os.path.join(annotation_path, name) selected_files = set() - with open(f, "r") as fid: + with open(f) as fid: data = fid.readlines() data = [x.strip().split(" ")[0] for x in data] data = [os.path.join(self.root, x) for x in data] diff --git a/torchvision/datasets/usps.py b/torchvision/datasets/usps.py index c90ebfa7e6f..4d0dedcb8be 100644 --- a/torchvision/datasets/usps.py +++ b/torchvision/datasets/usps.py @@ -49,7 +49,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(USPS, self).__init__(root, transform=transform, target_transform=target_transform) + super().__init__(root, transform=transform, target_transform=target_transform) split = "train" if train else "test" url, filename, checksum = self.split_list[split] full_path = os.path.join(self.root, filename) diff --git a/torchvision/datasets/utils.py b/torchvision/datasets/utils.py index feb8d28bcce..a12b04f1ad3 100644 --- a/torchvision/datasets/utils.py +++ b/torchvision/datasets/utils.py @@ -138,7 +138,7 @@ def download_url( try: print("Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) - except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] + except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined] if url[:5] == "https": url = url.replace("https:", "http:") print("Failed download. Trying https -> http instead." " Downloading " + url + " to " + fpath) @@ -428,7 +428,7 @@ def download_and_extract_archive( download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) - print("Extracting {} to {}".format(archive, extract_root)) + print(f"Extracting {archive} to {extract_root}") extract_archive(archive, extract_root, remove_finished) diff --git a/torchvision/datasets/video_utils.py b/torchvision/datasets/video_utils.py index 8d427c9d80c..1917dedad65 100644 --- a/torchvision/datasets/video_utils.py +++ b/torchvision/datasets/video_utils.py @@ -46,7 +46,7 @@ def unfold(tensor, size, step, dilation=1): return torch.as_strided(tensor, new_size, new_stride) -class _VideoTimestampsDataset(object): +class _VideoTimestampsDataset: """ Dataset used to parallelize the reading of the timestamps of a list of videos, given their paths in the filesystem. @@ -72,7 +72,7 @@ def _collate_fn(x): return x -class VideoClips(object): +class VideoClips: """ Given a list of video files, computes all consecutive subvideos of size `clip_length_in_frames`, where the distance between each subvideo in the @@ -359,7 +359,7 @@ def get_clip(self, idx): resampling_idx = resampling_idx - resampling_idx[0] video = video[resampling_idx] info["video_fps"] = self.frame_rate - assert len(video) == self.num_frames, "{} x {}".format(video.shape, self.num_frames) + assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}" return video, audio, info, video_idx def __getstate__(self): diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py index 591c0bb0c0c..296caa0226f 100644 --- a/torchvision/datasets/vision.py +++ b/torchvision/datasets/vision.py @@ -66,9 +66,9 @@ def __len__(self) -> int: def __repr__(self) -> str: head = "Dataset " + self.__class__.__name__ - body = ["Number of datapoints: {}".format(self.__len__())] + body = [f"Number of datapoints: {self.__len__()}"] if self.root is not None: - body.append("Root location: {}".format(self.root)) + body.append(f"Root location: {self.root}") body += self.extra_repr().splitlines() if hasattr(self, "transforms") and self.transforms is not None: body += [repr(self.transforms)] @@ -77,13 +77,13 @@ def __repr__(self) -> str: def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: lines = transform.__repr__().splitlines() - return ["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]] + return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]] def extra_repr(self) -> str: return "" -class StandardTransform(object): +class StandardTransform: def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None: self.transform = transform self.target_transform = target_transform @@ -97,7 +97,7 @@ def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]: def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: lines = transform.__repr__().splitlines() - return ["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]] + return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]] def __repr__(self) -> str: body = [self.__class__.__name__] diff --git a/torchvision/datasets/voc.py b/torchvision/datasets/voc.py index b2550e3ee65..b8d33e5c97d 100644 --- a/torchvision/datasets/voc.py +++ b/torchvision/datasets/voc.py @@ -114,7 +114,7 @@ def __init__( splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR) split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt") - with open(os.path.join(split_f), "r") as f: + with open(os.path.join(split_f)) as f: file_names = [x.strip() for x in f.readlines()] image_dir = os.path.join(voc_root, "JPEGImages") diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py index 805a9dbec6f..dd5f7191255 100644 --- a/torchvision/datasets/widerface.py +++ b/torchvision/datasets/widerface.py @@ -62,7 +62,7 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super(WIDERFace, self).__init__( + super().__init__( root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform ) # check arguments @@ -115,7 +115,7 @@ def parse_train_val_annotations_file(self) -> None: filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt" filepath = os.path.join(self.root, "wider_face_split", filename) - with open(filepath, "r") as f: + with open(filepath) as f: lines = f.readlines() file_name_line, num_boxes_line, box_annotation_line = True, False, False num_boxes, box_counter = 0, 0 @@ -157,12 +157,12 @@ def parse_train_val_annotations_file(self) -> None: box_counter = 0 labels.clear() else: - raise RuntimeError("Error parsing annotation file {}".format(filepath)) + raise RuntimeError(f"Error parsing annotation file {filepath}") def parse_test_annotations_file(self) -> None: filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt") filepath = abspath(expanduser(filepath)) - with open(filepath, "r") as f: + with open(filepath) as f: lines = f.readlines() for line in lines: line = line.rstrip() diff --git a/torchvision/io/_video_opt.py b/torchvision/io/_video_opt.py index d9dbc4a4f32..560491e60c5 100644 --- a/torchvision/io/_video_opt.py +++ b/torchvision/io/_video_opt.py @@ -20,7 +20,7 @@ # simple class for torch scripting # the complex Fraction class from fractions module is not scriptable -class Timebase(object): +class Timebase: __annotations__ = {"numerator": int, "denominator": int} __slots__ = ["numerator", "denominator"] @@ -34,7 +34,7 @@ def __init__( self.denominator = denominator -class VideoMetaData(object): +class VideoMetaData: __annotations__ = { "has_video": bool, "video_timebase": Timebase, @@ -74,7 +74,7 @@ def _validate_pts(pts_range): assert ( pts_range[0] <= pts_range[1] ), """Start pts should not be smaller than end pts, got - start pts: {0:d} and end pts: {1:d}""".format( + start pts: {:d} and end pts: {:d}""".format( pts_range[0], pts_range[1], ) diff --git a/torchvision/models/_utils.py b/torchvision/models/_utils.py index 2a7a1bbaa0f..f4e1cd84508 100644 --- a/torchvision/models/_utils.py +++ b/torchvision/models/_utils.py @@ -54,7 +54,7 @@ def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None: if not return_layers: break - super(IntermediateLayerGetter, self).__init__(layers) + super().__init__(layers) self.return_layers = orig_return_layers def forward(self, x): diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 32a8711f64c..0c492dad3a2 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -16,7 +16,7 @@ class AlexNet(nn.Module): def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: - super(AlexNet, self).__init__() + super().__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index a34b50e41e9..41ec8bb2721 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -25,7 +25,7 @@ class _DenseLayer(nn.Module): def __init__( self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False ) -> None: - super(_DenseLayer, self).__init__() + super().__init__() self.norm1: nn.BatchNorm2d self.add_module("norm1", nn.BatchNorm2d(num_input_features)) self.relu1: nn.ReLU @@ -106,7 +106,7 @@ def __init__( drop_rate: float, memory_efficient: bool = False, ) -> None: - super(_DenseBlock, self).__init__() + super().__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, @@ -127,7 +127,7 @@ def forward(self, init_features: Tensor) -> Tensor: class _Transition(nn.Sequential): def __init__(self, num_input_features: int, num_output_features: int) -> None: - super(_Transition, self).__init__() + super().__init__() self.add_module("norm", nn.BatchNorm2d(num_input_features)) self.add_module("relu", nn.ReLU(inplace=True)) self.add_module("conv", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) @@ -161,7 +161,7 @@ def __init__( memory_efficient: bool = False, ) -> None: - super(DenseNet, self).__init__() + super().__init__() # First convolution self.features = nn.Sequential( diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 1bb1be347f9..e5909bcaa8e 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -7,7 +7,7 @@ from torchvision.ops.misc import FrozenBatchNorm2d -class BalancedPositiveNegativeSampler(object): +class BalancedPositiveNegativeSampler: """ This class samples batches, ensuring that they contain a fixed proportion of positives """ @@ -118,7 +118,7 @@ def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) -> return targets -class BoxCoder(object): +class BoxCoder: """ This class encodes and decodes a set of bounding boxes into the representation used for training the regressors. @@ -217,7 +217,7 @@ def decode_single(self, rel_codes: Tensor, boxes: Tensor) -> Tensor: return pred_boxes -class Matcher(object): +class Matcher: """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 2e433958715..39ffc6c4a9b 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -37,7 +37,7 @@ def __init__( sizes=((128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),), ): - super(AnchorGenerator, self).__init__() + super().__init__() if not isinstance(sizes[0], (list, tuple)): # TODO change this @@ -216,7 +216,7 @@ def _grid_default_boxes( for k, f_k in enumerate(grid_sizes): # Now add the default boxes for each width-height pair if self.steps is not None: - x_f_k, y_f_k = [img_shape / self.steps[k] for img_shape in image_size] + x_f_k, y_f_k = (img_shape / self.steps[k] for img_shape in image_size) else: y_f_k, x_f_k = f_k diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index 70a7b40bd50..6cc325f60e2 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -29,7 +29,7 @@ class BackboneWithFPN(nn.Module): """ def __init__(self, backbone, return_layers, in_channels_list, out_channels, extra_blocks=None): - super(BackboneWithFPN, self).__init__() + super().__init__() if extra_blocks is None: extra_blocks = LastLevelMaxPool() diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 02da39e8c73..c6499420930 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -252,7 +252,7 @@ def __init__( image_std = [0.229, 0.224, 0.225] transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) - super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform) + super().__init__(backbone, rpn, roi_heads, transform) class TwoMLPHead(nn.Module): @@ -265,7 +265,7 @@ class TwoMLPHead(nn.Module): """ def __init__(self, in_channels, representation_size): - super(TwoMLPHead, self).__init__() + super().__init__() self.fc6 = nn.Linear(in_channels, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) @@ -290,7 +290,7 @@ class FastRCNNPredictor(nn.Module): """ def __init__(self, in_channels, num_classes): - super(FastRCNNPredictor, self).__init__() + super().__init__() self.cls_score = nn.Linear(in_channels, num_classes) self.bbox_pred = nn.Linear(in_channels, num_classes * 4) @@ -429,7 +429,7 @@ def _fasterrcnn_mobilenet_v3_large_fpn( ) if pretrained: if model_urls.get(weights_name, None) is None: - raise ValueError("No checkpoint is available for model {}".format(weights_name)) + raise ValueError(f"No checkpoint is available for model {weights_name}") state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) model.load_state_dict(state_dict) return model diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index c77c892e63e..3a65e21ed5b 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -24,7 +24,7 @@ class GeneralizedRCNN(nn.Module): """ def __init__(self, backbone, rpn, roi_heads, transform): - super(GeneralizedRCNN, self).__init__() + super().__init__() self.transform = transform self.backbone = backbone self.rpn = rpn diff --git a/torchvision/models/detection/image_list.py b/torchvision/models/detection/image_list.py index 58f32662b1a..583866557e4 100644 --- a/torchvision/models/detection/image_list.py +++ b/torchvision/models/detection/image_list.py @@ -4,7 +4,7 @@ from torch import Tensor -class ImageList(object): +class ImageList: """ Structure that holds a list of images (of possibly varying sizes) as a single tensor. diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 7cd975ea6a0..8cfe307f815 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -210,7 +210,7 @@ def __init__( keypoint_dim_reduced = 512 # == keypoint_layers[-1] keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints) - super(KeypointRCNN, self).__init__( + super().__init__( backbone, num_classes, # transform parameters @@ -258,7 +258,7 @@ def __init__(self, in_channels, layers): d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1)) d.append(nn.ReLU(inplace=True)) next_feature = out_channels - super(KeypointRCNNHeads, self).__init__(*d) + super().__init__(*d) for m in self.children(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") @@ -267,7 +267,7 @@ def __init__(self, in_channels, layers): class KeypointRCNNPredictor(nn.Module): def __init__(self, in_channels, num_keypoints): - super(KeypointRCNNPredictor, self).__init__() + super().__init__() input_features = in_channels deconv_kernel = 4 self.kps_score_lowres = nn.ConvTranspose2d( diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index 6b8208b19d8..64df0abd1c0 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -210,7 +210,7 @@ def __init__( mask_dim_reduced = 256 mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, mask_dim_reduced, num_classes) - super(MaskRCNN, self).__init__( + super().__init__( backbone, num_classes, # transform parameters @@ -261,13 +261,13 @@ def __init__(self, in_channels, layers, dilation): d = OrderedDict() next_feature = in_channels for layer_idx, layer_features in enumerate(layers, 1): - d["mask_fcn{}".format(layer_idx)] = nn.Conv2d( + d[f"mask_fcn{layer_idx}"] = nn.Conv2d( next_feature, layer_features, kernel_size=3, stride=1, padding=dilation, dilation=dilation ) - d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True) + d[f"relu{layer_idx}"] = nn.ReLU(inplace=True) next_feature = layer_features - super(MaskRCNNHeads, self).__init__(d) + super().__init__(d) for name, param in self.named_parameters(): if "weight" in name: nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") @@ -277,7 +277,7 @@ def __init__(self, in_channels, layers, dilation): class MaskRCNNPredictor(nn.Sequential): def __init__(self, in_channels, dim_reduced, num_classes): - super(MaskRCNNPredictor, self).__init__( + super().__init__( OrderedDict( [ ("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)), diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 35aee4b7d54..b7bbb81111e 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -517,7 +517,7 @@ def __init__( keypoint_head=None, keypoint_predictor=None, ): - super(RoIHeads, self).__init__() + super().__init__() self.box_similarity = box_ops.box_iou # assign ground-truth boxes for each proposal diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index c58e1a37af0..64f9e15105b 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -34,7 +34,7 @@ class RPNHead(nn.Module): """ def __init__(self, in_channels, num_anchors): - super(RPNHead, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1) self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1) @@ -137,7 +137,7 @@ def __init__( nms_thresh, score_thresh=0.0, ): - super(RegionProposalNetwork, self).__init__() + super().__init__() self.anchor_generator = anchor_generator self.head = head self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index c7d74b4e1af..543b5ca2562 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -626,7 +626,7 @@ def ssd300_vgg16( if pretrained: weights_name = "ssd300_vgg16_coco" if model_urls.get(weights_name, None) is None: - raise ValueError("No checkpoint is available for model {}".format(weights_name)) + raise ValueError(f"No checkpoint is available for model {weights_name}") state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) model.load_state_dict(state_dict) return model diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index 503b69d7380..e3b1d5a5588 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -274,7 +274,7 @@ def ssdlite320_mobilenet_v3_large( if pretrained: weights_name = "ssdlite320_mobilenet_v3_large_coco" if model_urls.get(weights_name, None) is None: - raise ValueError("No checkpoint is available for model {}".format(weights_name)) + raise ValueError(f"No checkpoint is available for model {weights_name}") state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) model.load_state_dict(state_dict) return model diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index e4a1134b85c..f14e86f3bbe 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -92,7 +92,7 @@ def __init__( size_divisible: int = 32, fixed_size: Optional[Tuple[int, int]] = None, ): - super(GeneralizedRCNNTransform, self).__init__() + super().__init__() if not isinstance(min_size, (list, tuple)): min_size = (min_size,) self.min_size = min_size @@ -264,8 +264,8 @@ def postprocess( def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" _indent = "\n " - format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std) - format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format( + format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})" + format_string += "{}Resize(min_size={}, max_size={}, mode='bilinear')".format( _indent, self.min_size, self.max_size ) format_string += "\n)" diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index b9a5913ea77..f504a3484e8 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -195,7 +195,7 @@ def __init__( ) # building inverted residual blocks - total_stage_blocks = sum([cnf.num_layers for cnf in inverted_residual_setting]) + total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting) stage_block_id = 0 for cnf in inverted_residual_setting: stage: List[nn.Module] = [] @@ -288,7 +288,7 @@ def _efficientnet_model( model = EfficientNet(inverted_residual_setting, dropout, **kwargs) if pretrained: if model_urls.get(arch, None) is None: - raise ValueError("No checkpoint is available for model type {}".format(arch)) + raise ValueError(f"No checkpoint is available for model type {arch}") state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index c2674cb83a1..114fbf855bc 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -26,7 +26,7 @@ def __init__(self, *args, **kwargs): if "leaf_modules" in kwargs: leaf_modules = kwargs.pop("leaf_modules") self.leaf_modules = leaf_modules - super(LeafModuleAwareTracer, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool: if isinstance(m, tuple(self.leaf_modules)): @@ -54,7 +54,7 @@ class NodePathTracer(LeafModuleAwareTracer): """ def __init__(self, *args, **kwargs): - super(NodePathTracer, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # Track the qualified name of the Node being traced self.current_module_qualname = "" # A map from FX Node to the qualified name\# diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index fb4dcfae956..9e4e4107e4f 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -74,7 +74,7 @@ def __init__( dropout: float = 0.2, dropout_aux: float = 0.7, ) -> None: - super(GoogLeNet, self).__init__() + super().__init__() if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] if init_weights is None: @@ -229,7 +229,7 @@ def __init__( pool_proj: int, conv_block: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(Inception, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) @@ -272,7 +272,7 @@ def __init__( conv_block: Optional[Callable[..., nn.Module]] = None, dropout: float = 0.7, ) -> None: - super(InceptionAux, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.conv = conv_block(in_channels, 128, kernel_size=1) @@ -301,7 +301,7 @@ def forward(self, x: Tensor) -> Tensor: class BasicConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None: - super(BasicConv2d, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 562d5e10a9a..39313e763e0 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -72,7 +72,7 @@ def __init__( init_weights: Optional[bool] = None, dropout: float = 0.5, ) -> None: - super(Inception3, self).__init__() + super().__init__() if inception_blocks is None: inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] if init_weights is None: @@ -212,7 +212,7 @@ class InceptionA(nn.Module): def __init__( self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionA, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) @@ -249,7 +249,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionB(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionB, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) @@ -279,7 +279,7 @@ class InceptionC(nn.Module): def __init__( self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionC, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) @@ -323,7 +323,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionD(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionD, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) @@ -354,7 +354,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionE(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionE, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) @@ -403,7 +403,7 @@ class InceptionAux(nn.Module): def __init__( self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionAux, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.conv0 = conv_block(in_channels, 128, kernel_size=1) @@ -432,7 +432,7 @@ def forward(self, x: Tensor) -> Tensor: class BasicConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None: - super(BasicConv2d, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 3f48f82c41e..32d231b1f8d 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -25,7 +25,7 @@ class _InvertedResidual(nn.Module): def __init__( self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1 ) -> None: - super(_InvertedResidual, self).__init__() + super().__init__() assert stride in [1, 2] assert kernel_size in [3, 5] mid_ch = in_ch * expansion_factor @@ -96,7 +96,7 @@ class MNASNet(torch.nn.Module): _version = 2 def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: - super(MNASNet, self).__init__() + super().__init__() assert alpha > 0.0 self.alpha = alpha self.num_classes = num_classes @@ -191,14 +191,14 @@ def _load_from_state_dict( UserWarning, ) - super(MNASNet, self)._load_from_state_dict( + super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def _load_pretrained(model_name: str, model: nn.Module, progress: bool) -> None: if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None: - raise ValueError("No checkpoint is available for model type {}".format(model_name)) + raise ValueError(f"No checkpoint is available for model type {model_name}") checkpoint_url = _MODEL_URLS[model_name] model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress)) diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 031faa9d572..8e7359d5a10 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -41,7 +41,7 @@ class InvertedResidual(nn.Module): def __init__( self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InvertedResidual, self).__init__() + super().__init__() self.stride = stride assert stride in [1, 2] @@ -109,7 +109,7 @@ def __init__( dropout (float): The droupout probability """ - super(MobileNetV2, self).__init__() + super().__init__() if block is None: block = InvertedResidual diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 5689b896d16..f44ecf6ac6a 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -276,7 +276,7 @@ def _mobilenet_v3_conf( ] last_channel = adjust_channels(1024 // reduce_divider) # C5 else: - raise ValueError("Unsupported model type {}".format(arch)) + raise ValueError(f"Unsupported model type {arch}") return inverted_residual_setting, last_channel @@ -292,7 +292,7 @@ def _mobilenet_v3_model( model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if pretrained: if model_urls.get(arch, None) is None: - raise ValueError("No checkpoint is available for model type {}".format(arch)) + raise ValueError(f"No checkpoint is available for model type {arch}") state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 9090564af17..d585a1cd4ad 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -84,7 +84,7 @@ def googlenet( class QuantizableBasicConv2d(BasicConv2d): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: @@ -99,9 +99,7 @@ def fuse_model(self) -> None: class QuantizableInception(Inception): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInception, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.cat = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -112,9 +110,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionAux, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: @@ -138,7 +134,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableGoogLeNet(GoogLeNet): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableGoogLeNet, self).__init__( # type: ignore[misc] + super().__init__( # type: ignore[misc] blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs ) self.quant = torch.quantization.QuantStub() diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index ea9326276f2..a161c62fb2e 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -92,7 +92,7 @@ def inception_v3( class QuantizableBasicConv2d(inception_module.BasicConv2d): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: @@ -108,9 +108,7 @@ def fuse_model(self) -> None: class QuantizableInceptionA(inception_module.InceptionA): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionA, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -121,9 +119,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionB(inception_module.InceptionB): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionB, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -134,9 +130,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionC(inception_module.InceptionC): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionC, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -147,9 +141,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionD(inception_module.InceptionD): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionD, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -160,9 +152,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionE(inception_module.InceptionE): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionE, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop1 = nn.quantized.FloatFunctional() self.myop2 = nn.quantized.FloatFunctional() self.myop3 = nn.quantized.FloatFunctional() @@ -196,9 +186,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(inception_module.InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInceptionAux, self).__init__( # type: ignore[misc] - conv_block=QuantizableBasicConv2d, *args, **kwargs - ) + super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] class QuantizableInception3(inception_module.Inception3): @@ -208,7 +196,7 @@ def __init__( aux_logits: bool = True, transform_input: bool = False, ) -> None: - super(QuantizableInception3, self).__init__( + super().__init__( num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input, diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index 8626de19aab..faa63e73be5 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -19,7 +19,7 @@ class QuantizableInvertedResidual(InvertedResidual): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.skip_add = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -42,7 +42,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: Args: Inherits args from floating point MobileNetV2 """ - super(QuantizableMobileNetV2, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.quant = QuantStub() self.dequant = DeQuantStub() diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index e19912e25b0..948b72ead7d 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -110,7 +110,7 @@ def fuse_model(self) -> None: def _load_weights(arch: str, model: QuantizableMobileNetV3, model_url: Optional[str], progress: bool) -> None: if model_url is None: - raise ValueError("No checkpoint is available for {}".format(arch)) + raise ValueError(f"No checkpoint is available for {arch}") state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index 596ae56d85b..1d09bf896c9 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -21,7 +21,7 @@ class QuantizableBasicBlock(BasicBlock): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableBasicBlock, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.add_relu = torch.nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -49,7 +49,7 @@ def fuse_model(self) -> None: class QuantizableBottleneck(Bottleneck): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableBottleneck, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.skip_add_relu = nn.quantized.FloatFunctional() self.relu1 = nn.ReLU(inplace=False) self.relu2 = nn.ReLU(inplace=False) @@ -80,7 +80,7 @@ def fuse_model(self) -> None: class QuantizableResNet(ResNet): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableResNet, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub() diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index 76920433399..c3b79b57c02 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -26,7 +26,7 @@ class QuantizableInvertedResidual(shufflenetv2.InvertedResidual): def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.cat = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -44,9 +44,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super(QuantizableShuffleNetV2, self).__init__( # type: ignore[misc] - *args, inverted_residual=QuantizableInvertedResidual, **kwargs - ) + super().__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) # type: ignore[misc] self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub() diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 7584ebb98ea..cfbb09dea67 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -67,7 +67,7 @@ def __init__( dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(BasicBlock, self).__init__() + super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: @@ -122,7 +122,7 @@ def __init__( dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(Bottleneck, self).__init__() + super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups @@ -172,7 +172,7 @@ def __init__( replace_stride_with_dilation: Optional[List[bool]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(ResNet, self).__init__() + super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer diff --git a/torchvision/models/segmentation/_utils.py b/torchvision/models/segmentation/_utils.py index 0e9a9477838..057d295e22f 100644 --- a/torchvision/models/segmentation/_utils.py +++ b/torchvision/models/segmentation/_utils.py @@ -9,7 +9,7 @@ class _SimpleSegmentationModel(nn.Module): __constants__ = ["aux_classifier"] def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None: - super(_SimpleSegmentationModel, self).__init__() + super().__init__() self.backbone = backbone self.classifier = classifier self.aux_classifier = aux_classifier diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py index a8f06bd89bd..faac05a6c71 100644 --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -31,7 +31,7 @@ class DeepLabV3(_SimpleSegmentationModel): class DeepLabHead(nn.Sequential): def __init__(self, in_channels: int, num_classes: int) -> None: - super(DeepLabHead, self).__init__( + super().__init__( ASPP(in_channels, [12, 24, 36]), nn.Conv2d(256, 256, 3, padding=1, bias=False), nn.BatchNorm2d(256), @@ -47,12 +47,12 @@ def __init__(self, in_channels: int, out_channels: int, dilation: int) -> None: nn.BatchNorm2d(out_channels), nn.ReLU(), ] - super(ASPPConv, self).__init__(*modules) + super().__init__(*modules) class ASPPPooling(nn.Sequential): def __init__(self, in_channels: int, out_channels: int) -> None: - super(ASPPPooling, self).__init__( + super().__init__( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), @@ -68,7 +68,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class ASPP(nn.Module): def __init__(self, in_channels: int, atrous_rates: List[int], out_channels: int = 256) -> None: - super(ASPP, self).__init__() + super().__init__() modules = [] modules.append( nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU()) diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index 6a935e9ac48..00138b796b9 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -34,4 +34,4 @@ def __init__(self, in_channels: int, channels: int) -> None: nn.Conv2d(inter_channels, channels, 1), ] - super(FCNHead, self).__init__(*layers) + super().__init__(*layers) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index c19e36e4705..404e39fee01 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -55,7 +55,7 @@ def _segm_model( aux_layer = str(aux_pos) aux_inplanes = backbone[aux_pos].out_channels else: - raise NotImplementedError("backbone {} is not supported as of now".format(backbone_name)) + raise NotImplementedError(f"backbone {backbone_name} is not supported as of now") return_layers = {out_layer: "out"} if aux: @@ -99,7 +99,7 @@ def _load_weights(model: nn.Module, arch_type: str, backbone: str, progress: boo arch = arch_type + "_" + backbone + "_coco" model_url = model_urls.get(arch, None) if model_url is None: - raise NotImplementedError("pretrained {} is not supported as of now".format(arch)) + raise NotImplementedError(f"pretrained {arch} is not supported as of now") else: state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index a9bb58fc9d1..e3c59fdd1c9 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -34,7 +34,7 @@ def channel_shuffle(x: Tensor, groups: int) -> Tensor: class InvertedResidual(nn.Module): def __init__(self, inp: int, oup: int, stride: int) -> None: - super(InvertedResidual, self).__init__() + super().__init__() if not (1 <= stride <= 3): raise ValueError("illegal stride value") @@ -98,7 +98,7 @@ def __init__( num_classes: int = 1000, inverted_residual: Callable[..., nn.Module] = InvertedResidual, ) -> None: - super(ShuffleNetV2, self).__init__() + super().__init__() if len(stages_repeats) != 3: raise ValueError("expected stages_repeats as list of 3 positive ints") @@ -121,7 +121,7 @@ def __init__( self.stage2: nn.Sequential self.stage3: nn.Sequential self.stage4: nn.Sequential - stage_names = ["stage{}".format(i) for i in [2, 3, 4]] + stage_names = [f"stage{i}" for i in [2, 3, 4]] for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]): seq = [inverted_residual(input_channels, output_channels, 2)] for i in range(repeats - 1): @@ -160,7 +160,7 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa if pretrained: model_url = model_urls[arch] if model_url is None: - raise NotImplementedError("pretrained {} is not supported as of now".format(arch)) + raise NotImplementedError(f"pretrained {arch} is not supported as of now") else: state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index c4a3265211f..ef499c696bb 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -16,7 +16,7 @@ class Fire(nn.Module): def __init__(self, inplanes: int, squeeze_planes: int, expand1x1_planes: int, expand3x3_planes: int) -> None: - super(Fire, self).__init__() + super().__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) @@ -34,7 +34,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class SqueezeNet(nn.Module): def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None: - super(SqueezeNet, self).__init__() + super().__init__() self.num_classes = num_classes if version == "1_0": self.features = nn.Sequential( diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 93b626c7d43..08f4a6d3c30 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -35,7 +35,7 @@ class VGG(nn.Module): def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5 ) -> None: - super(VGG, self).__init__() + super().__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index 5cfbbaeb559..e8a312fc44b 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -20,7 +20,7 @@ def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super(Conv3DSimple, self).__init__( + super().__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(3, 3, 3), @@ -36,7 +36,7 @@ def get_downsample_stride(stride: int) -> Tuple[int, int, int]: class Conv2Plus1D(nn.Sequential): def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None: - super(Conv2Plus1D, self).__init__( + super().__init__( nn.Conv3d( in_planes, midplanes, @@ -62,7 +62,7 @@ def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super(Conv3DNoTemporal, self).__init__( + super().__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(1, 3, 3), @@ -90,7 +90,7 @@ def __init__( ) -> None: midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) - super(BasicBlock, self).__init__() + super().__init__() self.conv1 = nn.Sequential( conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) ) @@ -125,7 +125,7 @@ def __init__( downsample: Optional[nn.Module] = None, ) -> None: - super(Bottleneck, self).__init__() + super().__init__() midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) # 1x1x1 @@ -166,7 +166,7 @@ class BasicStem(nn.Sequential): """The default conv-batchnorm-relu stem""" def __init__(self) -> None: - super(BasicStem, self).__init__( + super().__init__( nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(inplace=True), @@ -177,7 +177,7 @@ class R2Plus1dStem(nn.Sequential): """R(2+1)D stem is different than the default one as it uses separated 3D convolution""" def __init__(self) -> None: - super(R2Plus1dStem, self).__init__( + super().__init__( nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), nn.BatchNorm3d(45), nn.ReLU(inplace=True), @@ -208,7 +208,7 @@ def __init__( num_classes (int, optional): Dimension of the final FC layer. Defaults to 400. zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. """ - super(VideoResNet, self).__init__() + super().__init__() self.inplanes = 64 self.stem = stem() diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index 550f659b07d..679b413bd8d 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -122,7 +122,7 @@ def __init__( groups: int = 1, bias: bool = True, ): - super(DeformConv2d, self).__init__() + super().__init__() if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups") diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index 9e10613b630..8b65bf7fc22 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -74,7 +74,7 @@ def __init__( out_channels: int, extra_blocks: Optional[ExtraFPNBlock] = None, ): - super(FeaturePyramidNetwork, self).__init__() + super().__init__() self.inner_blocks = nn.ModuleList() self.layer_blocks = nn.ModuleList() for in_channels in in_channels_list: @@ -180,7 +180,7 @@ class LastLevelP6P7(ExtraFPNBlock): """ def __init__(self, in_channels: int, out_channels: int): - super(LastLevelP6P7, self).__init__() + super().__init__() self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) for module in [self.p6, self.p7]: diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index 3df290bc6c5..caf0d999f77 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -65,7 +65,7 @@ def __init__( if n is not None: warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning) num_features = n - super(FrozenBatchNorm2d, self).__init__() + super().__init__() self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) @@ -86,7 +86,7 @@ def _load_from_state_dict( if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] - super(FrozenBatchNorm2d, self)._load_from_state_dict( + super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 6d2388c74b3..b3c4924f12e 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -42,7 +42,7 @@ def initLevelMapper( return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps) -class LevelMapper(object): +class LevelMapper: """Determine which FPN level each RoI in a set of RoIs should map to based on the heuristic in the FPN paper. @@ -129,7 +129,7 @@ def __init__( canonical_scale: int = 224, canonical_level: int = 4, ): - super(MultiScaleRoIAlign, self).__init__() + super().__init__() if isinstance(output_size, int): output_size = (output_size, output_size) self.featmap_names = featmap_names diff --git a/torchvision/ops/ps_roi_align.py b/torchvision/ops/ps_roi_align.py index 0bfeefc867b..264ae352483 100644 --- a/torchvision/ops/ps_roi_align.py +++ b/torchvision/ops/ps_roi_align.py @@ -65,7 +65,7 @@ def __init__( spatial_scale: float, sampling_ratio: int, ): - super(PSRoIAlign, self).__init__() + super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio diff --git a/torchvision/ops/ps_roi_pool.py b/torchvision/ops/ps_roi_pool.py index cde3543300c..cfc33b60428 100644 --- a/torchvision/ops/ps_roi_pool.py +++ b/torchvision/ops/ps_roi_pool.py @@ -52,7 +52,7 @@ class PSRoIPool(nn.Module): """ def __init__(self, output_size: int, spatial_scale: float): - super(PSRoIPool, self).__init__() + super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py index 1178e8cd52c..e686c1c5210 100644 --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -72,7 +72,7 @@ def __init__( sampling_ratio: int, aligned: bool = False, ): - super(RoIAlign, self).__init__() + super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py index 5eb19154054..6f8a37f4432 100644 --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -54,7 +54,7 @@ class RoIPool(nn.Module): """ def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float): - super(RoIPool, self).__init__() + super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale diff --git a/torchvision/ops/stochastic_depth.py b/torchvision/ops/stochastic_depth.py index de120862941..0be179dbe67 100644 --- a/torchvision/ops/stochastic_depth.py +++ b/torchvision/ops/stochastic_depth.py @@ -22,9 +22,9 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) Tensor[N, ...]: The randomly zeroed tensor. """ if p < 0.0 or p > 1.0: - raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p)) + raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") if mode not in ["batch", "row"]: - raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode)) + raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}") if not training or p == 0.0: return input diff --git a/torchvision/prototype/datasets/utils/_dataset.py b/torchvision/prototype/datasets/utils/_dataset.py index 61e41a061e4..552c7346ce4 100644 --- a/torchvision/prototype/datasets/utils/_dataset.py +++ b/torchvision/prototype/datasets/utils/_dataset.py @@ -117,7 +117,7 @@ def __init__( elif isinstance(categories, int): categories = [str(label) for label in range(categories)] elif isinstance(categories, (str, pathlib.Path)): - with open(pathlib.Path(categories).expanduser().resolve(), "r") as fh: + with open(pathlib.Path(categories).expanduser().resolve()) as fh: categories = [line.strip() for line in fh] self.categories = tuple(categories) diff --git a/torchvision/transforms/_functional_video.py b/torchvision/transforms/_functional_video.py index 2b4fe371b98..56633f9abf6 100644 --- a/torchvision/transforms/_functional_video.py +++ b/torchvision/transforms/_functional_video.py @@ -101,4 +101,4 @@ def hflip(clip): flipped clip (torch.tensor): Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" - return clip.flip((-1)) + return clip.flip(-1) diff --git a/torchvision/transforms/_transforms_video.py b/torchvision/transforms/_transforms_video.py index f5c7836543a..629a42f4d21 100644 --- a/torchvision/transforms/_transforms_video.py +++ b/torchvision/transforms/_transforms_video.py @@ -44,7 +44,7 @@ def __call__(self, clip): return F.crop(clip, i, j, h, w) def __repr__(self): - return self.__class__.__name__ + "(size={0})".format(self.size) + return self.__class__.__name__ + f"(size={self.size})" class RandomResizedCropVideo(RandomResizedCrop): @@ -77,12 +77,12 @@ def __call__(self, clip): return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self): - return self.__class__.__name__ + "(size={0}, interpolation_mode={1}, scale={2}, ratio={3})".format( + return self.__class__.__name__ + "(size={}, interpolation_mode={}, scale={}, ratio={})".format( self.size, self.interpolation_mode, self.scale, self.ratio ) -class CenterCropVideo(object): +class CenterCropVideo: def __init__(self, crop_size): if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) @@ -100,10 +100,10 @@ def __call__(self, clip): return F.center_crop(clip, self.crop_size) def __repr__(self): - return self.__class__.__name__ + "(crop_size={0})".format(self.crop_size) + return self.__class__.__name__ + f"(crop_size={self.crop_size})" -class NormalizeVideo(object): +class NormalizeVideo: """ Normalize the video clip by mean subtraction and division by standard deviation Args: @@ -125,10 +125,10 @@ def __call__(self, clip): return F.normalize(clip, self.mean, self.std, self.inplace) def __repr__(self): - return self.__class__.__name__ + "(mean={0}, std={1}, inplace={2})".format(self.mean, self.std, self.inplace) + return self.__class__.__name__ + f"(mean={self.mean}, std={self.std}, inplace={self.inplace})" -class ToTensorVideo(object): +class ToTensorVideo: """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor @@ -150,7 +150,7 @@ def __repr__(self): return self.__class__.__name__ -class RandomHorizontalFlipVideo(object): +class RandomHorizontalFlipVideo: """ Flip the video clip along the horizonal direction with a given probability Args: @@ -172,4 +172,4 @@ def __call__(self, clip): return clip def __repr__(self): - return self.__class__.__name__ + "(p={0})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" diff --git a/torchvision/transforms/autoaugment.py b/torchvision/transforms/autoaugment.py index f99e0aa2950..e291f0971ea 100644 --- a/torchvision/transforms/autoaugment.py +++ b/torchvision/transforms/autoaugment.py @@ -76,7 +76,7 @@ def _apply_op( elif op_name == "Identity": pass else: - raise ValueError("The provided operator {} is not recognized.".format(op_name)) + raise ValueError(f"The provided operator {op_name} is not recognized.") return img @@ -208,7 +208,7 @@ def _get_policies( (("ShearX", 0.7, 2), ("Invert", 0.1, None)), ] else: - raise ValueError("The provided policy {} is not recognized.".format(policy)) + raise ValueError(f"The provided policy {policy} is not recognized.") def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[str, Tuple[Tensor, bool]]: return { @@ -270,7 +270,7 @@ def forward(self, img: Tensor) -> Tensor: return img def __repr__(self) -> str: - return self.__class__.__name__ + "(policy={}, fill={})".format(self.policy, self.fill) + return self.__class__.__name__ + f"(policy={self.policy}, fill={self.fill})" class RandAugment(torch.nn.Module): diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 9578134cae0..44abc4de6cf 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -111,10 +111,10 @@ def to_tensor(pic): Tensor: Converted image. """ if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): - raise TypeError("pic should be PIL Image or ndarray. Got {}".format(type(pic))) + raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") if _is_numpy(pic) and not _is_numpy_image(pic): - raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)) + raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.") default_float_dtype = torch.get_default_dtype() @@ -167,7 +167,7 @@ def pil_to_tensor(pic): Tensor: Converted image. """ if not F_pil._is_pil_image(pic): - raise TypeError("pic should be PIL Image. Got {}".format(type(pic))) + raise TypeError(f"pic should be PIL Image. Got {type(pic)}") if accimage is not None and isinstance(pic, accimage.Image): # accimage format is always uint8 internally, so always return uint8 here @@ -226,11 +226,11 @@ def to_pil_image(pic, mode=None): PIL Image: Image converted to PIL Image. """ if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): - raise TypeError("pic should be Tensor or ndarray. Got {}.".format(type(pic))) + raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") elif isinstance(pic, torch.Tensor): if pic.ndimension() not in {2, 3}: - raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndimension())) + raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndimension()} dimensions.") elif pic.ndimension() == 2: # if 2D image, add channel dimension (CHW) @@ -238,11 +238,11 @@ def to_pil_image(pic, mode=None): # check number of channels if pic.shape[-3] > 4: - raise ValueError("pic should not have > 4 channels. Got {} channels.".format(pic.shape[-3])) + raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-3]} channels.") elif isinstance(pic, np.ndarray): if pic.ndim not in {2, 3}: - raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)) + raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.") elif pic.ndim == 2: # if 2D image, add channel dimension (HWC) @@ -250,7 +250,7 @@ def to_pil_image(pic, mode=None): # check number of channels if pic.shape[-1] > 4: - raise ValueError("pic should not have > 4 channels. Got {} channels.".format(pic.shape[-1])) + raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.") npimg = pic if isinstance(pic, torch.Tensor): @@ -259,7 +259,7 @@ def to_pil_image(pic, mode=None): npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0)) if not isinstance(npimg, np.ndarray): - raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, " + "not {}".format(type(npimg))) + raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, " + f"not {type(npimg)}") if npimg.shape[2] == 1: expected_mode = None @@ -273,15 +273,13 @@ def to_pil_image(pic, mode=None): elif npimg.dtype == np.float32: expected_mode = "F" if mode is not None and mode != expected_mode: - raise ValueError( - "Incorrect mode ({}) supplied for input type {}. Should be {}".format(mode, np.dtype, expected_mode) - ) + raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}") mode = expected_mode elif npimg.shape[2] == 2: permitted_2_channel_modes = ["LA"] if mode is not None and mode not in permitted_2_channel_modes: - raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes)) + raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs") if mode is None and npimg.dtype == np.uint8: mode = "LA" @@ -289,19 +287,19 @@ def to_pil_image(pic, mode=None): elif npimg.shape[2] == 4: permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"] if mode is not None and mode not in permitted_4_channel_modes: - raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes)) + raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs") if mode is None and npimg.dtype == np.uint8: mode = "RGBA" else: permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"] if mode is not None and mode not in permitted_3_channel_modes: - raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes)) + raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs") if mode is None and npimg.dtype == np.uint8: mode = "RGB" if mode is None: - raise TypeError("Input type {} is not supported".format(npimg.dtype)) + raise TypeError(f"Input type {npimg.dtype} is not supported") return Image.fromarray(npimg, mode=mode) @@ -325,10 +323,10 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool Tensor: Normalized Tensor image. """ if not isinstance(tensor, torch.Tensor): - raise TypeError("Input tensor should be a torch tensor. Got {}.".format(type(tensor))) + raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") if not tensor.is_floating_point(): - raise TypeError("Input tensor should be a float tensor. Got {}.".format(tensor.dtype)) + raise TypeError(f"Input tensor should be a float tensor. Got {tensor.dtype}.") if tensor.ndim < 3: raise ValueError( @@ -343,7 +341,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) std = torch.as_tensor(std, dtype=dtype, device=tensor.device) if (std == 0).any(): - raise ValueError("std evaluated to zero after conversion to {}, leading to division by zero.".format(dtype)) + raise ValueError(f"std evaluated to zero after conversion to {dtype}, leading to division by zero.") if mean.ndim == 1: mean = mean.view(-1, 1, 1) if std.ndim == 1: @@ -923,7 +921,7 @@ def _get_inverse_affine_matrix( # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 rot = math.radians(angle) - sx, sy = [math.radians(s) for s in shear] + sx, sy = (math.radians(s) for s in shear) cx, cy = center tx, ty = translate @@ -1121,7 +1119,7 @@ def affine( shear = [shear[0], shear[0]] if len(shear) != 2: - raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear)) + raise ValueError(f"Shear should be a sequence containing two values. Got {shear}") img_size = get_image_size(img) if not isinstance(img, torch.Tensor): @@ -1201,7 +1199,7 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool Tensor Image: Erased image. """ if not isinstance(img, torch.Tensor): - raise TypeError("img should be Tensor Image. Got {}".format(type(img))) + raise TypeError(f"img should be Tensor Image. Got {type(img)}") if not inplace: img = img.clone() @@ -1237,34 +1235,34 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa PIL Image or Tensor: Gaussian Blurred version of the image. """ if not isinstance(kernel_size, (int, list, tuple)): - raise TypeError("kernel_size should be int or a sequence of integers. Got {}".format(type(kernel_size))) + raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if len(kernel_size) != 2: - raise ValueError("If kernel_size is a sequence its length should be 2. Got {}".format(len(kernel_size))) + raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}") for ksize in kernel_size: if ksize % 2 == 0 or ksize < 0: - raise ValueError("kernel_size should have odd and positive integers. Got {}".format(kernel_size)) + raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}") if sigma is None: sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] if sigma is not None and not isinstance(sigma, (int, float, list, tuple)): - raise TypeError("sigma should be either float or sequence of floats. Got {}".format(type(sigma))) + raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}") if isinstance(sigma, (int, float)): sigma = [float(sigma), float(sigma)] if isinstance(sigma, (list, tuple)) and len(sigma) == 1: sigma = [sigma[0], sigma[0]] if len(sigma) != 2: - raise ValueError("If sigma is a sequence, its length should be 2. Got {}".format(len(sigma))) + raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}") for s in sigma: if s <= 0.0: - raise ValueError("sigma should have positive values. Got {}".format(sigma)) + raise ValueError(f"sigma should have positive values. Got {sigma}") t_img = img if not isinstance(img, torch.Tensor): if not F_pil._is_pil_image(img): - raise TypeError("img should be PIL Image or Tensor. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}") t_img = to_tensor(img) @@ -1307,7 +1305,7 @@ def posterize(img: Tensor, bits: int) -> Tensor: PIL Image or Tensor: Posterized image. """ if not (0 <= bits <= 8): - raise ValueError("The number if bits should be between 0 and 8. Got {}".format(bits)) + raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") if not isinstance(img, torch.Tensor): return F_pil.posterize(img, bits) diff --git a/torchvision/transforms/functional_pil.py b/torchvision/transforms/functional_pil.py index eb2ab31a4a9..9a80706dc30 100644 --- a/torchvision/transforms/functional_pil.py +++ b/torchvision/transforms/functional_pil.py @@ -23,20 +23,20 @@ def _is_pil_image(img: Any) -> bool: def get_image_size(img: Any) -> List[int]: if _is_pil_image(img): return list(img.size) - raise TypeError("Unexpected type {}".format(type(img))) + raise TypeError(f"Unexpected type {type(img)}") @torch.jit.unused def get_image_num_channels(img: Any) -> int: if _is_pil_image(img): return 1 if img.mode == "L" else 3 - raise TypeError("Unexpected type {}".format(type(img))) + raise TypeError(f"Unexpected type {type(img)}") @torch.jit.unused def hflip(img: Image.Image) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return img.transpose(Image.FLIP_LEFT_RIGHT) @@ -44,7 +44,7 @@ def hflip(img: Image.Image) -> Image.Image: @torch.jit.unused def vflip(img: Image.Image) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return img.transpose(Image.FLIP_TOP_BOTTOM) @@ -52,7 +52,7 @@ def vflip(img: Image.Image) -> Image.Image: @torch.jit.unused def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(brightness_factor) @@ -62,7 +62,7 @@ def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image @torch.jit.unused def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(contrast_factor) @@ -72,7 +72,7 @@ def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image: @torch.jit.unused def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) @@ -82,10 +82,10 @@ def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image @torch.jit.unused def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image: if not (-0.5 <= hue_factor <= 0.5): - raise ValueError("hue_factor ({}) is not in [-0.5, 0.5].".format(hue_factor)) + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") input_mode = img.mode if input_mode in {"L", "1", "I", "F"}: @@ -111,7 +111,7 @@ def adjust_gamma( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") if gamma < 0: raise ValueError("Gamma should be a non-negative real number") @@ -134,7 +134,7 @@ def pad( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") if not isinstance(padding, (numbers.Number, tuple, list)): raise TypeError("Got inappropriate padding arg") @@ -148,7 +148,7 @@ def pad( if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]: raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding)) + "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" ) if isinstance(padding, tuple) and len(padding) == 1: @@ -217,7 +217,7 @@ def crop( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return img.crop((left, top, left + width, top + height)) @@ -231,9 +231,9 @@ def resize( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))): - raise TypeError("Got inappropriate size arg: {}".format(size)) + raise TypeError(f"Got inappropriate size arg: {size}") if isinstance(size, Sequence) and len(size) == 1: size = size[0] @@ -298,7 +298,7 @@ def affine( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") output_size = img.size opts = _parse_fill(fill, img) @@ -316,7 +316,7 @@ def rotate( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") opts = _parse_fill(fill, img) return img.rotate(angle, interpolation, expand, center, **opts) @@ -331,7 +331,7 @@ def perspective( ) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") opts = _parse_fill(fill, img) @@ -341,7 +341,7 @@ def perspective( @torch.jit.unused def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") if num_output_channels == 1: img = img.convert("L") @@ -359,28 +359,28 @@ def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image: @torch.jit.unused def invert(img: Image.Image) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.invert(img) @torch.jit.unused def posterize(img: Image.Image, bits: int) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.posterize(img, bits) @torch.jit.unused def solarize(img: Image.Image, threshold: int) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.solarize(img, threshold) @torch.jit.unused def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") enhancer = ImageEnhance.Sharpness(img) img = enhancer.enhance(sharpness_factor) @@ -390,12 +390,12 @@ def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image: @torch.jit.unused def autocontrast(img: Image.Image) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.autocontrast(img) @torch.jit.unused def equalize(img: Image.Image) -> Image.Image: if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) + raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.equalize(img) diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index d0fd78346b6..5c0fe450a32 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -28,7 +28,7 @@ def get_image_num_channels(img: Tensor) -> int: elif img.ndim > 2: return img.shape[-3] - raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim)) + raise TypeError(f"Input ndim should be 2 or more. Got {img.ndim}") def _max_value(dtype: torch.dtype) -> float: @@ -52,7 +52,7 @@ def _max_value(dtype: torch.dtype) -> float: def _assert_channels(img: Tensor, permitted: List[int]) -> None: c = get_image_num_channels(img) if c not in permitted: - raise TypeError("Input image tensor permitted channel values are {}, but found {}".format(permitted, c)) + raise TypeError(f"Input image tensor permitted channel values are {permitted}, but found {c}") def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: @@ -134,7 +134,7 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: if img.ndim < 3: - raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") _assert_channels(img, [3]) if num_output_channels not in (1, 3): @@ -154,7 +154,7 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: if brightness_factor < 0: - raise ValueError("brightness_factor ({}) is not non-negative.".format(brightness_factor)) + raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.") _assert_image_tensor(img) @@ -165,7 +165,7 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: if contrast_factor < 0: - raise ValueError("contrast_factor ({}) is not non-negative.".format(contrast_factor)) + raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.") _assert_image_tensor(img) @@ -182,7 +182,7 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: if not (-0.5 <= hue_factor <= 0.5): - raise ValueError("hue_factor ({}) is not in [-0.5, 0.5].".format(hue_factor)) + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") if not (isinstance(img, torch.Tensor)): raise TypeError("Input img should be Tensor image") @@ -211,7 +211,7 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: if saturation_factor < 0: - raise ValueError("saturation_factor ({}) is not non-negative.".format(saturation_factor)) + raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.") _assert_image_tensor(img) @@ -382,7 +382,7 @@ def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor: # crop if needed if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0: - crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding] + crop_left, crop_right, crop_top, crop_bottom = (-min(x, 0) for x in padding) img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right] padding = [max(x, 0) for x in padding] @@ -422,7 +422,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con if isinstance(padding, list) and len(padding) not in [1, 2, 4]: raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding)) + "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" ) if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: @@ -597,7 +597,7 @@ def _assert_grid_transform_inputs( raise ValueError(msg.format(len(fill), num_channels)) if interpolation not in supported_interpolation_modes: - raise ValueError("Interpolation mode '{}' is unsupported with Tensor input".format(interpolation)) + raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input") def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]: @@ -823,7 +823,7 @@ def _get_gaussian_kernel2d( def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor: if not (isinstance(img, torch.Tensor)): - raise TypeError("img should be Tensor. Got {}".format(type(img))) + raise TypeError(f"img should be Tensor. Got {type(img)}") _assert_image_tensor(img) @@ -852,7 +852,7 @@ def invert(img: Tensor) -> Tensor: _assert_image_tensor(img) if img.ndim < 3: - raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") _assert_channels(img, [1, 3]) @@ -865,9 +865,9 @@ def posterize(img: Tensor, bits: int) -> Tensor: _assert_image_tensor(img) if img.ndim < 3: - raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") if img.dtype != torch.uint8: - raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype)) + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") _assert_channels(img, [1, 3]) mask = -int(2 ** (8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1) @@ -879,7 +879,7 @@ def solarize(img: Tensor, threshold: float) -> Tensor: _assert_image_tensor(img) if img.ndim < 3: - raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") _assert_channels(img, [1, 3]) @@ -912,7 +912,7 @@ def _blurred_degenerate_image(img: Tensor) -> Tensor: def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: if sharpness_factor < 0: - raise ValueError("sharpness_factor ({}) is not non-negative.".format(sharpness_factor)) + raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.") _assert_image_tensor(img) @@ -929,7 +929,7 @@ def autocontrast(img: Tensor) -> Tensor: _assert_image_tensor(img) if img.ndim < 3: - raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") _assert_channels(img, [1, 3]) @@ -976,9 +976,9 @@ def equalize(img: Tensor) -> Tensor: _assert_image_tensor(img) if not (3 <= img.ndim <= 4): - raise TypeError("Input image tensor should have 3 or 4 dimensions, but found {}".format(img.ndim)) + raise TypeError(f"Input image tensor should have 3 or 4 dimensions, but found {img.ndim}") if img.dtype != torch.uint8: - raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype)) + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") _assert_channels(img, [1, 3]) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index e402aa7f9a7..54ecca433e1 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -98,7 +98,7 @@ def __repr__(self): format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" - format_string += " {0}".format(t) + format_string += f" {t}" format_string += "\n)" return format_string @@ -220,7 +220,7 @@ def __call__(self, pic): def __repr__(self): format_string = self.__class__.__name__ + "(" if self.mode is not None: - format_string += "mode={0}".format(self.mode) + format_string += f"mode={self.mode}" format_string += ")" return format_string @@ -260,7 +260,7 @@ def forward(self, tensor: Tensor) -> Tensor: return F.normalize(tensor, self.mean, self.std, self.inplace) def __repr__(self): - return self.__class__.__name__ + "(mean={0}, std={1})".format(self.mean, self.std) + return self.__class__.__name__ + f"(mean={self.mean}, std={self.std})" class Resize(torch.nn.Module): @@ -310,7 +310,7 @@ class Resize(torch.nn.Module): def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None): super().__init__() if not isinstance(size, (int, Sequence)): - raise TypeError("Size should be int or sequence. Got {}".format(type(size))) + raise TypeError(f"Size should be int or sequence. Got {type(size)}") if isinstance(size, Sequence) and len(size) not in (1, 2): raise ValueError("If size is a sequence, it should have 1 or 2 values") self.size = size @@ -339,7 +339,7 @@ def forward(self, img): def __repr__(self): interpolate_str = self.interpolation.value - return self.__class__.__name__ + "(size={0}, interpolation={1}, max_size={2}, antialias={3})".format( + return self.__class__.__name__ + "(size={}, interpolation={}, max_size={}, antialias={})".format( self.size, interpolate_str, self.max_size, self.antialias ) @@ -353,7 +353,7 @@ def __init__(self, *args, **kwargs): warnings.warn( "The use of the transforms.Scale transform is deprecated, " + "please use transforms.Resize instead." ) - super(Scale, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class CenterCrop(torch.nn.Module): @@ -383,7 +383,7 @@ def forward(self, img): return F.center_crop(img, self.size) def __repr__(self): - return self.__class__.__name__ + "(size={0})".format(self.size) + return self.__class__.__name__ + f"(size={self.size})" class Pad(torch.nn.Module): @@ -437,7 +437,7 @@ def __init__(self, padding, fill=0, padding_mode="constant"): if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]: raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding)) + "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" ) self.padding = padding @@ -455,7 +455,7 @@ def forward(self, img): return F.pad(img, self.padding, self.fill, self.padding_mode) def __repr__(self): - return self.__class__.__name__ + "(padding={0}, fill={1}, padding_mode={2})".format( + return self.__class__.__name__ + "(padding={}, fill={}, padding_mode={})".format( self.padding, self.fill, self.padding_mode ) @@ -469,7 +469,7 @@ class Lambda: def __init__(self, lambd): if not callable(lambd): - raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__))) + raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}") self.lambd = lambd def __call__(self, img): @@ -498,7 +498,7 @@ def __repr__(self): format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" - format_string += " {0}".format(t) + format_string += f" {t}" format_string += "\n)" return format_string @@ -537,10 +537,10 @@ def forward(self, img): def __repr__(self): format_string = self.__class__.__name__ + "(" - format_string += "\n p={}".format(self.p) + format_string += f"\n p={self.p}" for t in self.transforms: format_string += "\n" - format_string += " {0}".format(t) + format_string += f" {t}" format_string += "\n)" return format_string @@ -571,7 +571,7 @@ def __call__(self, *args): def __repr__(self): format_string = super().__repr__() - format_string += "(p={0})".format(self.p) + format_string += f"(p={self.p})" return format_string @@ -634,7 +634,7 @@ def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int th, tw = output_size if h + 1 < th or w + 1 < tw: - raise ValueError("Required crop size {} is larger then input image size {}".format((th, tw), (h, w))) + raise ValueError(f"Required crop size {(th, tw)} is larger then input image size {(h, w)}") if w == tw and h == th: return 0, 0, h, w @@ -679,7 +679,7 @@ def forward(self, img): return F.crop(img, i, j, h, w) def __repr__(self): - return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding) + return self.__class__.__name__ + f"(size={self.size}, padding={self.padding})" class RandomHorizontalFlip(torch.nn.Module): @@ -709,7 +709,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomVerticalFlip(torch.nn.Module): @@ -739,7 +739,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomPerspective(torch.nn.Module): @@ -839,7 +839,7 @@ def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[L return startpoints, endpoints def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomResizedCrop(torch.nn.Module): @@ -951,10 +951,10 @@ def forward(self, img): def __repr__(self): interpolate_str = self.interpolation.value - format_string = self.__class__.__name__ + "(size={0}".format(self.size) - format_string += ", scale={0}".format(tuple(round(s, 4) for s in self.scale)) - format_string += ", ratio={0}".format(tuple(round(r, 4) for r in self.ratio)) - format_string += ", interpolation={0})".format(interpolate_str) + format_string = self.__class__.__name__ + f"(size={self.size}" + format_string += f", scale={tuple(round(s, 4) for s in self.scale)}" + format_string += f", ratio={tuple(round(r, 4) for r in self.ratio)}" + format_string += f", interpolation={interpolate_str})" return format_string @@ -968,7 +968,7 @@ def __init__(self, *args, **kwargs): "The use of the transforms.RandomSizedCrop transform is deprecated, " + "please use transforms.RandomResizedCrop instead." ) - super(RandomSizedCrop, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class FiveCrop(torch.nn.Module): @@ -1014,7 +1014,7 @@ def forward(self, img): return F.five_crop(img, self.size) def __repr__(self): - return self.__class__.__name__ + "(size={0})".format(self.size) + return self.__class__.__name__ + f"(size={self.size})" class TenCrop(torch.nn.Module): @@ -1063,7 +1063,7 @@ def forward(self, img): return F.ten_crop(img, self.size, self.vertical_flip) def __repr__(self): - return self.__class__.__name__ + "(size={0}, vertical_flip={1})".format(self.size, self.vertical_flip) + return self.__class__.__name__ + f"(size={self.size}, vertical_flip={self.vertical_flip})" class LinearTransformation(torch.nn.Module): @@ -1095,7 +1095,7 @@ def __init__(self, transformation_matrix, mean_vector): if mean_vector.size(0) != transformation_matrix.size(0): raise ValueError( - "mean_vector should have the same length {}".format(mean_vector.size(0)) + f"mean_vector should have the same length {mean_vector.size(0)}" + " as any one of the dimensions of the transformation_matrix [{}]".format( tuple(transformation_matrix.size()) ) @@ -1124,8 +1124,8 @@ def forward(self, tensor: Tensor) -> Tensor: if n != self.transformation_matrix.shape[0]: raise ValueError( "Input tensor and transformation matrix have incompatible shape." - + "[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) - + "{}".format(self.transformation_matrix.shape[0]) + + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != " + + f"{self.transformation_matrix.shape[0]}" ) if tensor.device.type != self.mean_vector.device.type: @@ -1178,15 +1178,15 @@ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): def _check_input(self, value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: - raise ValueError("If {} is a single number, it must be non negative.".format(name)) + raise ValueError(f"If {name} is a single number, it must be non negative.") value = [center - float(value), center + float(value)] if clip_first_on_zero: value[0] = max(value[0], 0.0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: - raise ValueError("{} values should be between {}".format(name, bound)) + raise ValueError(f"{name} values should be between {bound}") else: - raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name)) + raise TypeError(f"{name} should be a single number or a list/tuple with length 2.") # if value is 0 or (1., 1.) for brightness/contrast/saturation # or (0., 0.) for hue, do nothing @@ -1252,10 +1252,10 @@ def forward(self, img): def __repr__(self): format_string = self.__class__.__name__ + "(" - format_string += "brightness={0}".format(self.brightness) - format_string += ", contrast={0}".format(self.contrast) - format_string += ", saturation={0}".format(self.saturation) - format_string += ", hue={0})".format(self.hue) + format_string += f"brightness={self.brightness}" + format_string += f", contrast={self.contrast}" + format_string += f", saturation={self.saturation}" + format_string += f", hue={self.hue})" return format_string @@ -1352,13 +1352,13 @@ def forward(self, img): def __repr__(self): interpolate_str = self.interpolation.value - format_string = self.__class__.__name__ + "(degrees={0}".format(self.degrees) - format_string += ", interpolation={0}".format(interpolate_str) - format_string += ", expand={0}".format(self.expand) + format_string = self.__class__.__name__ + f"(degrees={self.degrees}" + format_string += f", interpolation={interpolate_str}" + format_string += f", expand={self.expand}" if self.center is not None: - format_string += ", center={0}".format(self.center) + format_string += f", center={self.center}" if self.fill is not None: - format_string += ", fill={0}".format(self.fill) + format_string += f", fill={self.fill}" format_string += ")" return format_string @@ -1568,7 +1568,7 @@ def forward(self, img): return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels) def __repr__(self): - return self.__class__.__name__ + "(num_output_channels={0})".format(self.num_output_channels) + return self.__class__.__name__ + f"(num_output_channels={self.num_output_channels})" class RandomGrayscale(torch.nn.Module): @@ -1605,7 +1605,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={0})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomErasing(torch.nn.Module): @@ -1734,11 +1734,11 @@ def forward(self, img): return img def __repr__(self): - s = "(p={}, ".format(self.p) - s += "scale={}, ".format(self.scale) - s += "ratio={}, ".format(self.ratio) - s += "value={}, ".format(self.value) - s += "inplace={})".format(self.inplace) + s = f"(p={self.p}, " + s += f"scale={self.scale}, " + s += f"ratio={self.ratio}, " + s += f"value={self.value}, " + s += f"inplace={self.inplace})" return self.__class__.__name__ + s @@ -1803,8 +1803,8 @@ def forward(self, img: Tensor) -> Tensor: return F.gaussian_blur(img, self.kernel_size, [sigma, sigma]) def __repr__(self): - s = "(kernel_size={}, ".format(self.kernel_size) - s += "sigma={})".format(self.sigma) + s = f"(kernel_size={self.kernel_size}, " + s += f"sigma={self.sigma})" return self.__class__.__name__ + s @@ -1824,15 +1824,15 @@ def _setup_size(size, error_msg): def _check_sequence_input(x, name, req_sizes): msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes]) if not isinstance(x, Sequence): - raise TypeError("{} should be a sequence of length {}.".format(name, msg)) + raise TypeError(f"{name} should be a sequence of length {msg}.") if len(x) not in req_sizes: - raise ValueError("{} should be sequence of length {}.".format(name, msg)) + raise ValueError(f"{name} should be sequence of length {msg}.") def _setup_angle(x, name, req_sizes=(2,)): if isinstance(x, numbers.Number): if x < 0: - raise ValueError("If {} is a single number, it must be positive.".format(name)) + raise ValueError(f"If {name} is a single number, it must be positive.") x = [-x, x] else: _check_sequence_input(x, name, req_sizes) @@ -1867,7 +1867,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomPosterize(torch.nn.Module): @@ -1899,7 +1899,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(bits={},p={})".format(self.bits, self.p) + return self.__class__.__name__ + f"(bits={self.bits},p={self.p})" class RandomSolarize(torch.nn.Module): @@ -1931,7 +1931,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(threshold={},p={})".format(self.threshold, self.p) + return self.__class__.__name__ + f"(threshold={self.threshold},p={self.p})" class RandomAdjustSharpness(torch.nn.Module): @@ -1963,7 +1963,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(sharpness_factor={},p={})".format(self.sharpness_factor, self.p) + return self.__class__.__name__ + f"(sharpness_factor={self.sharpness_factor},p={self.p})" class RandomAutocontrast(torch.nn.Module): @@ -1993,7 +1993,7 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" class RandomEqualize(torch.nn.Module): @@ -2023,4 +2023,4 @@ def forward(self, img): return img def __repr__(self): - return self.__class__.__name__ + "(p={})".format(self.p) + return self.__class__.__name__ + f"(p={self.p})" diff --git a/torchvision/utils.py b/torchvision/utils.py index a71e0f234b4..2b9b5da3b29 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -1,7 +1,7 @@ import math import pathlib import warnings -from typing import Union, Optional, List, Tuple, Text, BinaryIO +from typing import Union, Optional, List, Tuple, BinaryIO import numpy as np import torch @@ -114,7 +114,7 @@ def norm_range(t, value_range): @torch.no_grad() def save_image( tensor: Union[torch.Tensor, List[torch.Tensor]], - fp: Union[Text, pathlib.Path, BinaryIO], + fp: Union[str, pathlib.Path, BinaryIO], format: Optional[str] = None, **kwargs, ) -> None: From 7d04bcfe4504fa3210ebb46973f7aa746a2ae625 Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 14:05:48 +0200 Subject: [PATCH 05/17] fix " " --- .circleci/unittest/linux/scripts/run-clang-format.py | 4 ++-- packaging/wheel/relocate.py | 4 ++-- references/classification/train.py | 2 +- references/classification/train_quantization.py | 2 +- references/detection/group_by_aspect_ratio.py | 2 +- references/detection/train.py | 2 +- references/segmentation/utils.py | 2 +- references/video_classification/train.py | 4 ++-- setup.py | 2 +- test/test_image.py | 4 ++-- torchvision/__init__.py | 2 +- torchvision/datasets/cityscapes.py | 2 +- torchvision/datasets/imagenet.py | 2 +- torchvision/datasets/lsun.py | 4 ++-- torchvision/datasets/samplers/clip_sampler.py | 6 +++--- torchvision/datasets/sbd.py | 2 +- torchvision/datasets/stl10.py | 4 ++-- torchvision/datasets/utils.py | 4 ++-- torchvision/datasets/video_utils.py | 2 +- torchvision/datasets/vision.py | 2 +- torchvision/io/image.py | 2 +- torchvision/io/video.py | 2 +- torchvision/models/detection/_utils.py | 4 ++-- torchvision/models/detection/faster_rcnn.py | 2 +- torchvision/models/detection/generalized_rcnn.py | 4 ++-- torchvision/models/detection/retinanet.py | 4 ++-- torchvision/models/detection/ssd.py | 4 ++-- torchvision/models/detection/transform.py | 2 +- torchvision/models/feature_extraction.py | 8 ++++---- torchvision/models/googlenet.py | 2 +- torchvision/models/quantization/googlenet.py | 2 +- torchvision/models/squeezenet.py | 2 +- torchvision/ops/_register_onnx_ops.py | 2 +- torchvision/transforms/functional_pil.py | 2 +- torchvision/transforms/functional_tensor.py | 8 ++++---- 35 files changed, 54 insertions(+), 54 deletions(-) diff --git a/.circleci/unittest/linux/scripts/run-clang-format.py b/.circleci/unittest/linux/scripts/run-clang-format.py index 6c336526382..5c61b2519e0 100755 --- a/.circleci/unittest/linux/scripts/run-clang-format.py +++ b/.circleci/unittest/linux/scripts/run-clang-format.py @@ -226,7 +226,7 @@ def main(): metavar="N", type=int, default=0, - help="run N clang-format jobs in parallel" " (default number of cpus + 1)", + help="run N clang-format jobs in parallel (default number of cpus + 1)", ) parser.add_argument( "--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)" @@ -237,7 +237,7 @@ def main(): metavar="PATTERN", action="append", default=[], - help="exclude paths matching the given glob-like pattern(s)" " from recursive search", + help="exclude paths matching the given glob-like pattern(s) from recursive search", ) args = parser.parse_args() diff --git a/packaging/wheel/relocate.py b/packaging/wheel/relocate.py index 665dab3bf40..027dddb0dcd 100644 --- a/packaging/wheel/relocate.py +++ b/packaging/wheel/relocate.py @@ -316,7 +316,7 @@ def patch_linux(): patchelf = find_program("patchelf") if patchelf is None: raise FileNotFoundError( - "Patchelf was not found in the system, please" " make sure that is available on the PATH." + "Patchelf was not found in the system, please make sure that is available on the PATH." ) # Find wheel @@ -354,7 +354,7 @@ def patch_win(): dumpbin = find_program("dumpbin") if dumpbin is None: raise FileNotFoundError( - "Dumpbin was not found in the system, please" " make sure that is available on the PATH." + "Dumpbin was not found in the system, please make sure that is available on the PATH." ) # Find wheel diff --git a/references/classification/train.py b/references/classification/train.py index 565878a42a7..2ec3f979237 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -273,7 +273,7 @@ def main(args): ) else: raise RuntimeError( - f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant " "are supported." + f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported." ) lr_scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[args.lr_warmup_epochs] diff --git a/references/classification/train_quantization.py b/references/classification/train_quantization.py index 8b2b6dc85e6..fe9ab4ae269 100644 --- a/references/classification/train_quantization.py +++ b/references/classification/train_quantization.py @@ -20,7 +20,7 @@ def main(args): print(args) if args.post_training_quantize and args.distributed: - raise RuntimeError("Post training quantization example should not be performed " "on distributed mode") + raise RuntimeError("Post training quantization example should not be performed on distributed mode") # Set backend engine to ensure that quantized model runs on the correct kernels if args.backend not in torch.backends.quantized.supported_engines: diff --git a/references/detection/group_by_aspect_ratio.py b/references/detection/group_by_aspect_ratio.py index 17f17ee495e..728c28da3a8 100644 --- a/references/detection/group_by_aspect_ratio.py +++ b/references/detection/group_by_aspect_ratio.py @@ -37,7 +37,7 @@ class GroupedBatchSampler(BatchSampler): def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): raise ValueError( - "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) + "sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = group_ids diff --git a/references/detection/train.py b/references/detection/train.py index f228a639208..64e7ebd87d1 100644 --- a/references/detection/train.py +++ b/references/detection/train.py @@ -65,7 +65,7 @@ def get_args_parser(add_help=True): "--lr", default=0.02, type=float, - help="initial learning rate, 0.02 is the default value for training " "on 8 gpus and 2 images_per_gpu", + help="initial learning rate, 0.02 is the default value for training on 8 gpus and 2 images_per_gpu", ) parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum") parser.add_argument( diff --git a/references/segmentation/utils.py b/references/segmentation/utils.py index 45e6f025393..57ab6ecd670 100644 --- a/references/segmentation/utils.py +++ b/references/segmentation/utils.py @@ -101,7 +101,7 @@ def reduce_from_all_processes(self): def __str__(self): acc_global, acc, iu = self.compute() - return ("global correct: {:.1f}\n" "average row correct: {}\n" "IoU: {}\n" "mean IoU: {:.1f}").format( + return ("global correct: {:.1f}\naverage row correct: {}\nIoU: {}\nmean IoU: {:.1f}").format( acc_global.item() * 100, [f"{i:.1f}" for i in (acc * 100).tolist()], [f"{i:.1f}" for i in (iu * 100).tolist()], diff --git a/references/video_classification/train.py b/references/video_classification/train.py index 432ff590b0e..f78125663fd 100644 --- a/references/video_classification/train.py +++ b/references/video_classification/train.py @@ -127,7 +127,7 @@ def main(args): dataset.transform = transform_train else: if args.distributed: - print("It is recommended to pre-compute the dataset cache " "on a single-gpu first, as it will be faster") + print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster") dataset = torchvision.datasets.Kinetics400( traindir, frames_per_clip=args.clip_len, @@ -157,7 +157,7 @@ def main(args): dataset_test.transform = transform_test else: if args.distributed: - print("It is recommended to pre-compute the dataset cache " "on a single-gpu first, as it will be faster") + print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster") dataset_test = torchvision.datasets.Kinetics400( valdir, frames_per_clip=args.clip_len, diff --git a/setup.py b/setup.py index 1ee1e61ac17..00da5bacdb1 100644 --- a/setup.py +++ b/setup.py @@ -279,7 +279,7 @@ def get_extensions(): image_include += [png_include] image_link_flags.append("png") else: - print("libpng installed version is less than 1.6.0, " "disabling PNG support") + print("libpng installed version is less than 1.6.0, disabling PNG support") png_found = False else: # Windows diff --git a/test/test_image.py b/test/test_image.py index c4ee7f2a4ae..f80e1d889fb 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -379,10 +379,10 @@ def test_encode_jpeg_errors(): with pytest.raises(RuntimeError, match="Input tensor dtype should be uint8"): encode_jpeg(torch.empty((3, 100, 100), dtype=torch.float32)) - with pytest.raises(ValueError, match="Image quality should be a positive number " "between 1 and 100"): + with pytest.raises(ValueError, match="Image quality should be a positive number between 1 and 100"): encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=-1) - with pytest.raises(ValueError, match="Image quality should be a positive number " "between 1 and 100"): + with pytest.raises(ValueError, match="Image quality should be a positive number between 1 and 100"): encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=101) with pytest.raises(RuntimeError, match="The number of channels should be 1 or 3, got: 5"): diff --git a/torchvision/__init__.py b/torchvision/__init__.py index 0940cf98b4a..32b522cbc42 100644 --- a/torchvision/__init__.py +++ b/torchvision/__init__.py @@ -74,7 +74,7 @@ def set_video_backend(backend): if backend not in ["pyav", "video_reader"]: raise ValueError("Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend) if backend == "video_reader" and not io._HAS_VIDEO_OPT: - message = "video_reader video backend is not available." " Please compile torchvision from source and try again" + message = "video_reader video backend is not available. Please compile torchvision from source and try again" warnings.warn(message) else: _video_backend = backend diff --git a/torchvision/datasets/cityscapes.py b/torchvision/datasets/cityscapes.py index d2c120a8fe8..6836797b6b3 100644 --- a/torchvision/datasets/cityscapes.py +++ b/torchvision/datasets/cityscapes.py @@ -125,7 +125,7 @@ def __init__( valid_modes = ("train", "test", "val") else: valid_modes = ("train", "train_extra", "val") - msg = "Unknown value '{}' for argument split if mode is '{}'. " "Valid values are {{{}}}." + msg = "Unknown value '{}' for argument split if mode is '{}'. Valid values are {{{}}}." msg = msg.format(split, mode, iterable_to_str(valid_modes)) verify_str_arg(split, "split", valid_modes, msg) diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py index 6cd8ee619a3..82bee905ff1 100644 --- a/torchvision/datasets/imagenet.py +++ b/torchvision/datasets/imagenet.py @@ -49,7 +49,7 @@ def __init__(self, root: str, split: str = "train", download: Optional[str] = No ) raise RuntimeError(msg) elif download is False: - msg = "The use of the download flag is deprecated, since the dataset " "is no longer publicly accessible." + msg = "The use of the download flag is deprecated, since the dataset is no longer publicly accessible." warnings.warn(msg, RuntimeWarning) root = self.root = os.path.expanduser(root) diff --git a/torchvision/datasets/lsun.py b/torchvision/datasets/lsun.py index 3079e530371..5303b1d4756 100644 --- a/torchvision/datasets/lsun.py +++ b/torchvision/datasets/lsun.py @@ -117,11 +117,11 @@ def _verify_classes(self, classes: Union[str, List[str]]) -> List[str]: classes = [c + "_" + classes for c in categories] except ValueError: if not isinstance(classes, Iterable): - msg = "Expected type str or Iterable for argument classes, " "but got type {}." + msg = "Expected type str or Iterable for argument classes, but got type {}." raise ValueError(msg.format(type(classes))) classes = list(classes) - msg_fmtstr_type = "Expected type str for elements in argument classes, " "but got type {}." + msg_fmtstr_type = "Expected type str for elements in argument classes, but got type {}." for c in classes: verify_str_arg(c, custom_msg=msg_fmtstr_type.format(type(c))) c_short = c.split("_") diff --git a/torchvision/datasets/samplers/clip_sampler.py b/torchvision/datasets/samplers/clip_sampler.py index 259621bf91f..e63a48389b9 100644 --- a/torchvision/datasets/samplers/clip_sampler.py +++ b/torchvision/datasets/samplers/clip_sampler.py @@ -54,7 +54,7 @@ def __init__( rank = dist.get_rank() assert ( len(dataset) % group_size == 0 - ), "dataset length must be a multiplier of group size" "dataset length: %d, group size: %d" % ( + ), "dataset length must be a multiplier of group size dataset length: %d, group size: %d" % ( len(dataset), group_size, ) @@ -117,7 +117,7 @@ class UniformClipSampler(Sampler): def __init__(self, video_clips: VideoClips, num_clips_per_video: int) -> None: if not isinstance(video_clips, VideoClips): - raise TypeError("Expected video_clips to be an instance of VideoClips, " "got {}".format(type(video_clips))) + raise TypeError("Expected video_clips to be an instance of VideoClips, got {}".format(type(video_clips))) self.video_clips = video_clips self.num_clips_per_video = num_clips_per_video @@ -151,7 +151,7 @@ class RandomClipSampler(Sampler): def __init__(self, video_clips: VideoClips, max_clips_per_video: int) -> None: if not isinstance(video_clips, VideoClips): - raise TypeError("Expected video_clips to be an instance of VideoClips, " "got {}".format(type(video_clips))) + raise TypeError("Expected video_clips to be an instance of VideoClips, got {}".format(type(video_clips))) self.video_clips = video_clips self.max_clips_per_video = max_clips_per_video diff --git a/torchvision/datasets/sbd.py b/torchvision/datasets/sbd.py index d1f2f3016a2..1ec22a551d0 100644 --- a/torchvision/datasets/sbd.py +++ b/torchvision/datasets/sbd.py @@ -63,7 +63,7 @@ def __init__( self._loadmat = loadmat except ImportError: - raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: " "pip install scipy") + raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy") super().__init__(root, transforms) self.image_set = verify_str_arg(image_set, "image_set", ("train", "val", "train_noval")) diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py index 354e583c3d2..00032963ff5 100644 --- a/torchvision/datasets/stl10.py +++ b/torchvision/datasets/stl10.py @@ -60,7 +60,7 @@ def __init__( if download: self.download() elif not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted. " "You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") # now load the picked numpy arrays self.labels: Optional[np.ndarray] @@ -92,7 +92,7 @@ def _verify_folds(self, folds: Optional[int]) -> Optional[int]: elif isinstance(folds, int): if folds in range(10): return folds - msg = "Value for argument folds should be in the range [0, 10), " "but got {}." + msg = "Value for argument folds should be in the range [0, 10), but got {}." raise ValueError(msg.format(folds)) else: msg = "Expected type None or int for argument folds, but got type {}." diff --git a/torchvision/datasets/utils.py b/torchvision/datasets/utils.py index a12b04f1ad3..2fd89e4c5a2 100644 --- a/torchvision/datasets/utils.py +++ b/torchvision/datasets/utils.py @@ -141,7 +141,7 @@ def download_url( except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined] if url[:5] == "https": url = url.replace("https:", "http:") - print("Failed download. Trying https -> http instead." " Downloading " + url + " to " + fpath) + print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) else: raise e @@ -460,7 +460,7 @@ def verify_str_arg( if custom_msg is not None: msg = custom_msg else: - msg = "Unknown value '{value}' for argument {arg}. " "Valid values are {{{valid_values}}}." + msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}." msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values)) raise ValueError(msg) diff --git a/torchvision/datasets/video_utils.py b/torchvision/datasets/video_utils.py index 1917dedad65..573a53cccfc 100644 --- a/torchvision/datasets/video_utils.py +++ b/torchvision/datasets/video_utils.py @@ -293,7 +293,7 @@ def get_clip(self, idx): video_idx (int): index of the video in `video_paths` """ if idx >= self.num_clips(): - raise IndexError("Index {} out of range " "({} number of clips)".format(idx, self.num_clips())) + raise IndexError("Index {} out of range ({} number of clips)".format(idx, self.num_clips())) video_idx, clip_idx = self.get_clip_location(idx) video_path = self.video_paths[video_idx] clip_pts = self.clips[video_idx][clip_idx] diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py index 296caa0226f..7ee228b1656 100644 --- a/torchvision/datasets/vision.py +++ b/torchvision/datasets/vision.py @@ -41,7 +41,7 @@ def __init__( has_transforms = transforms is not None has_separate_transform = transform is not None or target_transform is not None if has_transforms and has_separate_transform: - raise ValueError("Only transforms or transform/target_transform can " "be passed as argument") + raise ValueError("Only transforms or transform/target_transform can be passed as argument") # for backwards-compatibility self.transform = transform diff --git a/torchvision/io/image.py b/torchvision/io/image.py index 2ba1e9eddd9..45e6caeaded 100644 --- a/torchvision/io/image.py +++ b/torchvision/io/image.py @@ -161,7 +161,7 @@ def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: JPEG file. """ if quality < 1 or quality > 100: - raise ValueError("Image quality should be a positive number " "between 1 and 100") + raise ValueError("Image quality should be a positive number between 1 and 100") output = torch.ops.image.encode_jpeg(input, quality) return output diff --git a/torchvision/io/video.py b/torchvision/io/video.py index e5648459113..61eda488b70 100644 --- a/torchvision/io/video.py +++ b/torchvision/io/video.py @@ -272,7 +272,7 @@ def read_video( if end_pts < start_pts: raise ValueError( - "end_pts should be larger than start_pts, got " "start_pts={} and end_pts={}".format(start_pts, end_pts) + "end_pts should be larger than start_pts, got start_pts={} and end_pts={}".format(start_pts, end_pts) ) info = {} diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index e5909bcaa8e..b870e6a2456 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -275,9 +275,9 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: if match_quality_matrix.numel() == 0: # empty targets or proposals not supported during training if match_quality_matrix.shape[0] == 0: - raise ValueError("No ground-truth boxes available for one of the images " "during training") + raise ValueError("No ground-truth boxes available for one of the images during training") else: - raise ValueError("No proposal boxes available for one of the images " "during training") + raise ValueError("No proposal boxes available for one of the images during training") # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index c6499420930..e194d5c2c10 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -192,7 +192,7 @@ def __init__( raise ValueError("num_classes should be None when box_predictor is specified") else: if box_predictor is None: - raise ValueError("num_classes should not be None when box_predictor " "is not specified") + raise ValueError("num_classes should not be None when box_predictor is not specified") out_channels = backbone.out_channels diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 3a65e21ed5b..e080d2107f3 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -63,10 +63,10 @@ def forward(self, images, targets=None): if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: raise ValueError( - "Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape) + "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) ) else: - raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes))) + raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) original_image_sizes: List[Tuple[int, int]] = [] for img in images: diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index eb05144cb0c..a3695247b77 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -490,10 +490,10 @@ def forward(self, images, targets=None): if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: raise ValueError( - "Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape) + "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) ) else: - raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes))) + raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 543b5ca2562..0c92f82fd98 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -311,10 +311,10 @@ def forward( if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: raise ValueError( - "Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape) + "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) ) else: - raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes))) + raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index f14e86f3bbe..59790c32aa0 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -124,7 +124,7 @@ def forward( if image.dim() != 3: raise ValueError( - "images is expected to be a list of 3d tensors " "of shape [C, H, W], got {}".format(image.shape) + "images is expected to be a list of 3d tensors of shape [C, H, W], got {}".format(image.shape) ) image = self.normalize(image) image, target_index = self.resize(image, target_index) diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index 114fbf855bc..b472db1692c 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -168,7 +168,7 @@ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathT "are a subsequence of those obtained in eval mode. " ) else: - msg = "The nodes obtained by tracing the model in train mode " "are different to those obtained in eval mode. " + msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. " warnings.warn(msg + suggestion_msg) @@ -400,15 +400,15 @@ def create_feature_extractor( is_training = model.training assert any(arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]), ( - "Either `return_nodes` or `train_return_nodes` and " "`eval_return_nodes` together, should be specified" + "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified" ) assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), ( - "If any of `train_return_nodes` and `eval_return_nodes` are " "specified, then both should be specified" + "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" ) assert (return_nodes is None) ^ (train_return_nodes is None), ( - "If `train_return_nodes` and `eval_return_nodes` are specified, " "then both should be specified" + "If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" ) # Put *_return_nodes into Dict[str, str] format diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 9e4e4107e4f..a998c00a015 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -44,7 +44,7 @@ def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> kwargs["aux_logits"] = False if kwargs["aux_logits"]: warnings.warn( - "auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them" + "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them" ) original_aux_logits = kwargs["aux_logits"] kwargs["aux_logits"] = True diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index d585a1cd4ad..440819a2ceb 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -49,7 +49,7 @@ def googlenet( kwargs["aux_logits"] = False if kwargs["aux_logits"]: warnings.warn( - "auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them" + "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them" ) original_aux_logits = kwargs["aux_logits"] kwargs["aux_logits"] = True diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index ef499c696bb..63cf18039f8 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -72,7 +72,7 @@ def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float # FIXME: Is this needed? SqueezeNet should only be called from the # FIXME: squeezenet1_x() functions # FIXME: This checking is not done for the other models - raise ValueError("Unsupported SqueezeNet version {version}:" "1_0 or 1_1 expected".format(version=version)) + raise ValueError("Unsupported SqueezeNet version {version}:1_0 or 1_1 expected".format(version=version)) # Final convolution is initialized differently from the rest final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) diff --git a/torchvision/ops/_register_onnx_ops.py b/torchvision/ops/_register_onnx_ops.py index 124b0d75624..0fdbeecdfb9 100644 --- a/torchvision/ops/_register_onnx_ops.py +++ b/torchvision/ops/_register_onnx_ops.py @@ -37,7 +37,7 @@ def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampli # ONNX doesn't support negative sampling_ratio if sampling_ratio < 0: warnings.warn( - "ONNX doesn't support negative sampling ratio," "therefore is is set to 0 in order to be exported." + "ONNX doesn't support negative sampling ratio, therefore is is set to 0 in order to be exported." ) sampling_ratio = 0 return g.op( diff --git a/torchvision/transforms/functional_pil.py b/torchvision/transforms/functional_pil.py index 9a80706dc30..08494a8f63d 100644 --- a/torchvision/transforms/functional_pil.py +++ b/torchvision/transforms/functional_pil.py @@ -281,7 +281,7 @@ def _parse_fill( fill = tuple([fill] * num_bands) if isinstance(fill, (list, tuple)): if len(fill) != num_bands: - msg = "The number of elements in 'fill' does not match the number of " "bands of the image ({} != {})" + msg = "The number of elements in 'fill' does not match the number of bands of the image ({} != {})" raise ValueError(msg.format(len(fill), num_bands)) fill = tuple(fill) diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 5c0fe450a32..13041ed196b 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -246,7 +246,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor: """DEPRECATED""" warnings.warn( - "This method is deprecated and will be removed in future releases. " "Please, use ``F.center_crop`` instead." + "This method is deprecated and will be removed in future releases. Please, use ``F.center_crop`` instead." ) _assert_image_tensor(img) @@ -268,7 +268,7 @@ def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor: def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]: """DEPRECATED""" warnings.warn( - "This method is deprecated and will be removed in future releases. " "Please, use ``F.five_crop`` instead." + "This method is deprecated and will be removed in future releases. Please, use ``F.five_crop`` instead." ) _assert_image_tensor(img) @@ -293,7 +293,7 @@ def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]: def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]: """DEPRECATED""" warnings.warn( - "This method is deprecated and will be removed in future releases. " "Please, use ``F.ten_crop`` instead." + "This method is deprecated and will be removed in future releases. Please, use ``F.ten_crop`` instead." ) _assert_image_tensor(img) @@ -501,7 +501,7 @@ def resize( if isinstance(size, list): if len(size) not in [1, 2]: raise ValueError( - "Size must be an int or a 1 or 2 element tuple/list, not a " "{} element tuple/list".format(len(size)) + "Size must be an int or a 1 or 2 element tuple/list, not a {} element tuple/list".format(len(size)) ) if max_size is not None and len(size) != 1: raise ValueError( From 2754a64229a85d82c65d764319db26037151e2a7 Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 14:12:01 +0200 Subject: [PATCH 06/17] fix tuple --- torchvision/transforms/functional.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 44abc4de6cf..b97123bcb00 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -921,7 +921,8 @@ def _get_inverse_affine_matrix( # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 rot = math.radians(angle) - sx, sy = (math.radians(s) for s in shear) + sx = math.radians(shear[0]) + sy = math.radians(shear[1]) cx, cy = center tx, ty = translate From e4a28dff388969a492dc9247a1bf848f7f01066a Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 14:22:49 +0200 Subject: [PATCH 07/17] apply --- packaging/wheel/relocate.py | 8 ++------ references/detection/group_by_aspect_ratio.py | 4 +--- torchvision/datasets/samplers/clip_sampler.py | 4 ++-- torchvision/datasets/video_utils.py | 2 +- torchvision/io/video.py | 4 +--- .../models/detection/generalized_rcnn.py | 6 ++---- torchvision/models/detection/retinanet.py | 6 ++---- torchvision/models/detection/ssd.py | 6 ++---- torchvision/models/detection/transform.py | 4 +--- torchvision/models/feature_extraction.py | 18 +++++++++--------- torchvision/models/squeezenet.py | 2 +- torchvision/transforms/functional_tensor.py | 2 +- 12 files changed, 25 insertions(+), 41 deletions(-) diff --git a/packaging/wheel/relocate.py b/packaging/wheel/relocate.py index 027dddb0dcd..e6a4ef9d458 100644 --- a/packaging/wheel/relocate.py +++ b/packaging/wheel/relocate.py @@ -315,9 +315,7 @@ def patch_linux(): # Get patchelf location patchelf = find_program("patchelf") if patchelf is None: - raise FileNotFoundError( - "Patchelf was not found in the system, please make sure that is available on the PATH." - ) + raise FileNotFoundError("Patchelf was not found in the system, please make sure that is available on the PATH.") # Find wheel print("Finding wheels...") @@ -353,9 +351,7 @@ def patch_win(): # Get dumpbin location dumpbin = find_program("dumpbin") if dumpbin is None: - raise FileNotFoundError( - "Dumpbin was not found in the system, please make sure that is available on the PATH." - ) + raise FileNotFoundError("Dumpbin was not found in the system, please make sure that is available on the PATH.") # Find wheel print("Finding wheels...") diff --git a/references/detection/group_by_aspect_ratio.py b/references/detection/group_by_aspect_ratio.py index 728c28da3a8..1323849a6a4 100644 --- a/references/detection/group_by_aspect_ratio.py +++ b/references/detection/group_by_aspect_ratio.py @@ -36,9 +36,7 @@ class GroupedBatchSampler(BatchSampler): def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): - raise ValueError( - "sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler) - ) + raise ValueError(f"sampler should be an instance of torch.utils.data.Sampler, but got sampler={sampler}") self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size diff --git a/torchvision/datasets/samplers/clip_sampler.py b/torchvision/datasets/samplers/clip_sampler.py index e63a48389b9..ad7427f1949 100644 --- a/torchvision/datasets/samplers/clip_sampler.py +++ b/torchvision/datasets/samplers/clip_sampler.py @@ -117,7 +117,7 @@ class UniformClipSampler(Sampler): def __init__(self, video_clips: VideoClips, num_clips_per_video: int) -> None: if not isinstance(video_clips, VideoClips): - raise TypeError("Expected video_clips to be an instance of VideoClips, got {}".format(type(video_clips))) + raise TypeError(f"Expected video_clips to be an instance of VideoClips, got {type(video_clips)}") self.video_clips = video_clips self.num_clips_per_video = num_clips_per_video @@ -151,7 +151,7 @@ class RandomClipSampler(Sampler): def __init__(self, video_clips: VideoClips, max_clips_per_video: int) -> None: if not isinstance(video_clips, VideoClips): - raise TypeError("Expected video_clips to be an instance of VideoClips, got {}".format(type(video_clips))) + raise TypeError(f"Expected video_clips to be an instance of VideoClips, got {type(video_clips)}") self.video_clips = video_clips self.max_clips_per_video = max_clips_per_video diff --git a/torchvision/datasets/video_utils.py b/torchvision/datasets/video_utils.py index 573a53cccfc..f0f19e332ed 100644 --- a/torchvision/datasets/video_utils.py +++ b/torchvision/datasets/video_utils.py @@ -293,7 +293,7 @@ def get_clip(self, idx): video_idx (int): index of the video in `video_paths` """ if idx >= self.num_clips(): - raise IndexError("Index {} out of range ({} number of clips)".format(idx, self.num_clips())) + raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)") video_idx, clip_idx = self.get_clip_location(idx) video_path = self.video_paths[video_idx] clip_pts = self.clips[video_idx][clip_idx] diff --git a/torchvision/io/video.py b/torchvision/io/video.py index 61eda488b70..0ddd60a4586 100644 --- a/torchvision/io/video.py +++ b/torchvision/io/video.py @@ -271,9 +271,7 @@ def read_video( end_pts = float("inf") if end_pts < start_pts: - raise ValueError( - "end_pts should be larger than start_pts, got start_pts={} and end_pts={}".format(start_pts, end_pts) - ) + raise ValueError(f"end_pts should be larger than start_pts, got start_pts={start_pts} and end_pts={end_pts}") info = {} video_frames = [] diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index e080d2107f3..1bb4fa2952f 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -62,11 +62,9 @@ def forward(self, images, targets=None): boxes = target["boxes"] if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError( - "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) - ) + raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) + raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") original_image_sizes: List[Tuple[int, int]] = [] for img in images: diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index a3695247b77..60dc620e909 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -489,11 +489,9 @@ def forward(self, images, targets=None): boxes = target["boxes"] if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError( - "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) - ) + raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) + raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 0c92f82fd98..2ea15d71a0d 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -310,11 +310,9 @@ def forward( boxes = target["boxes"] if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError( - "Expected target boxes to be a tensor of shape [N, 4], got {:}.".format(boxes.shape) - ) + raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise ValueError("Expected target boxes to be of type Tensor, got {:}.".format(type(boxes))) + raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 59790c32aa0..53b276887cf 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -123,9 +123,7 @@ def forward( target_index = targets[i] if targets is not None else None if image.dim() != 3: - raise ValueError( - "images is expected to be a list of 3d tensors of shape [C, H, W], got {}".format(image.shape) - ) + raise ValueError(f"images is expected to be a list of 3d tensors of shape [C, H, W], got {image.shape}") image = self.normalize(image) image, target_index = self.resize(image, target_index) images[i] = image diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index b472db1692c..0095f21f62b 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -399,17 +399,17 @@ def create_feature_extractor( """ is_training = model.training - assert any(arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]), ( - "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified" - ) + assert any( + arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes] + ), "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified" - assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), ( - "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" - ) + assert not ( + (train_return_nodes is None) ^ (eval_return_nodes is None) + ), "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" - assert (return_nodes is None) ^ (train_return_nodes is None), ( - "If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" - ) + assert (return_nodes is None) ^ ( + train_return_nodes is None + ), "If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified" # Put *_return_nodes into Dict[str, str] format def to_strdict(n) -> Dict[str, str]: diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 63cf18039f8..db0fe28fd7e 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -72,7 +72,7 @@ def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float # FIXME: Is this needed? SqueezeNet should only be called from the # FIXME: squeezenet1_x() functions # FIXME: This checking is not done for the other models - raise ValueError("Unsupported SqueezeNet version {version}:1_0 or 1_1 expected".format(version=version)) + raise ValueError(f"Unsupported SqueezeNet version {version}:1_0 or 1_1 expected") # Final convolution is initialized differently from the rest final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 13041ed196b..91be14b0023 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -501,7 +501,7 @@ def resize( if isinstance(size, list): if len(size) not in [1, 2]: raise ValueError( - "Size must be an int or a 1 or 2 element tuple/list, not a {} element tuple/list".format(len(size)) + f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list" ) if max_size is not None and len(size) != 1: raise ValueError( From 2a06e9c228ad54aba20710f55d57cb274719cde5 Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 14:27:47 +0200 Subject: [PATCH 08/17] fix --- test/test_functional_tensor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 48f2a29e767..7b3f2566ee5 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -111,9 +111,9 @@ def test_rotate(self, device, height, width, center, dt, angle, expand, fill, fn if out_tensor.dtype != torch.uint8: out_tensor = out_tensor.to(torch.uint8) - assert out_tensor.shape == out_pil_tensor.shape, ( - f"{(height, width, NEAREST, dt, angle, expand, center)}: " f"{out_tensor.shape} vs {out_pil_tensor.shape}" - ) + assert ( + out_tensor.shape == out_pil_tensor.shape + ), f"{(height, width, NEAREST, dt, angle, expand, center)}: {out_tensor.shape} vs {out_pil_tensor.shape}" num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] From ea8265ec1236d577bf111d6f44de3d4f9167468c Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 20:44:03 +0200 Subject: [PATCH 09/17] fix " + " --- test/test_transforms.py | 2 +- torchvision/datasets/caltech.py | 4 ++-- torchvision/datasets/celeba.py | 2 +- torchvision/datasets/cifar.py | 6 ++---- torchvision/datasets/inaturalist.py | 2 +- torchvision/datasets/lfw.py | 2 +- torchvision/datasets/mnist.py | 2 +- torchvision/datasets/omniglot.py | 2 +- torchvision/datasets/sbd.py | 2 +- torchvision/datasets/sbu.py | 2 +- torchvision/datasets/semeion.py | 2 +- torchvision/datasets/svhn.py | 2 +- torchvision/datasets/widerface.py | 4 +--- torchvision/models/quantization/googlenet.py | 2 +- torchvision/models/quantization/inception.py | 2 +- torchvision/transforms/functional.py | 4 ++-- torchvision/transforms/functional_pil.py | 4 +--- torchvision/transforms/functional_tensor.py | 7 +++---- torchvision/transforms/transforms.py | 8 ++------ 19 files changed, 25 insertions(+), 36 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index b4bfe963c0e..4ae9f2c8f7c 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -2211,7 +2211,7 @@ def _test_transformation(self, angle, translate, scale, shear, pil_image, input_ n_diff_pixels = np.sum(np_result != true_result) / 3 # Accept 3 wrong pixels error_msg = ( - f"angle={angle}, translate={translate}, scale={scale}, shear={shear}\n" + f"n diff pixels={n_diff_pixels}\n" + f"angle={angle}, translate={translate}, scale={scale}, shear={shear}\nn diff pixels={n_diff_pixels}\n" ) assert n_diff_pixels < 3, error_msg diff --git a/torchvision/datasets/caltech.py b/torchvision/datasets/caltech.py index 38f086fd04d..8eec18c02cb 100644 --- a/torchvision/datasets/caltech.py +++ b/torchvision/datasets/caltech.py @@ -50,7 +50,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories"))) self.categories.remove("BACKGROUND_Google") # this is not a real class @@ -172,7 +172,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories"))) self.index: List[int] = [] diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py index 327e862ea5e..69abc3db957 100644 --- a/torchvision/datasets/celeba.py +++ b/torchvision/datasets/celeba.py @@ -80,7 +80,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") split_map = { "train": 0, diff --git a/torchvision/datasets/cifar.py b/torchvision/datasets/cifar.py index deabd445d22..7f57e42bc19 100644 --- a/torchvision/datasets/cifar.py +++ b/torchvision/datasets/cifar.py @@ -66,7 +66,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") if self.train: downloaded_list = self.train_list @@ -95,9 +95,7 @@ def __init__( def _load_meta(self) -> None: path = os.path.join(self.root, self.base_folder, self.meta["filename"]) if not check_integrity(path, self.meta["md5"]): - raise RuntimeError( - "Dataset metadata file not found or corrupted." + " You can use download=True to download it" - ) + raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it") with open(path, "rb") as infile: data = pickle.load(infile, encoding="latin1") self.classes = data[self.meta["key"]] diff --git a/torchvision/datasets/inaturalist.py b/torchvision/datasets/inaturalist.py index 2191c3f487e..b409cc79f08 100644 --- a/torchvision/datasets/inaturalist.py +++ b/torchvision/datasets/inaturalist.py @@ -81,7 +81,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") self.all_categories: List[str] = [] diff --git a/torchvision/datasets/lfw.py b/torchvision/datasets/lfw.py index 6720c29ae9d..a25765d5725 100644 --- a/torchvision/datasets/lfw.py +++ b/torchvision/datasets/lfw.py @@ -53,7 +53,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") self.images_dir = os.path.join(self.root, images_dir) diff --git a/torchvision/datasets/mnist.py b/torchvision/datasets/mnist.py index 5aac1cebca8..2916f594471 100644 --- a/torchvision/datasets/mnist.py +++ b/torchvision/datasets/mnist.py @@ -98,7 +98,7 @@ def __init__( self.download() if not self._check_exists(): - raise RuntimeError("Dataset not found." + " You can use download=True to download it") + raise RuntimeError("Dataset not found. You can use download=True to download it") self.data, self.targets = self._load_data() diff --git a/torchvision/datasets/omniglot.py b/torchvision/datasets/omniglot.py index 1607b7b3060..5a09d61ccca 100644 --- a/torchvision/datasets/omniglot.py +++ b/torchvision/datasets/omniglot.py @@ -46,7 +46,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) diff --git a/torchvision/datasets/sbd.py b/torchvision/datasets/sbd.py index 1ec22a551d0..ce485680910 100644 --- a/torchvision/datasets/sbd.py +++ b/torchvision/datasets/sbd.py @@ -83,7 +83,7 @@ def __init__( download_url(self.voc_train_url, sbd_root, self.voc_split_filename, self.voc_split_md5) if not os.path.isdir(sbd_root): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") split_f = os.path.join(sbd_root, image_set.rstrip("\n") + ".txt") diff --git a/torchvision/datasets/sbu.py b/torchvision/datasets/sbu.py index 9e4cb7a94eb..cd483a46190 100644 --- a/torchvision/datasets/sbu.py +++ b/torchvision/datasets/sbu.py @@ -39,7 +39,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") # Read the caption for each photo self.photos = [] diff --git a/torchvision/datasets/semeion.py b/torchvision/datasets/semeion.py index b4407b779d2..23c25a11540 100644 --- a/torchvision/datasets/semeion.py +++ b/torchvision/datasets/semeion.py @@ -41,7 +41,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") fp = os.path.join(self.root, self.filename) data = np.loadtxt(fp) diff --git a/torchvision/datasets/svhn.py b/torchvision/datasets/svhn.py index 444bc59ca28..faefd77cc43 100644 --- a/torchvision/datasets/svhn.py +++ b/torchvision/datasets/svhn.py @@ -70,7 +70,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError("Dataset not found or corrupted." + " You can use download=True to download it") + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") # import here rather than at top of file because this is # an optional dependency for torchvision diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py index dd5f7191255..b977c43ed6b 100644 --- a/torchvision/datasets/widerface.py +++ b/torchvision/datasets/widerface.py @@ -72,9 +72,7 @@ def __init__( self.download() if not self._check_integrity(): - raise RuntimeError( - "Dataset not found or corrupted. " + "You can use download=True to download and prepare it" - ) + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download and prepare it") self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = [] if self.split in ("train", "val"): diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 440819a2ceb..d25bf6f3e4d 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -67,7 +67,7 @@ def googlenet( if pretrained: if quantize: - model_url = quant_model_urls["googlenet" + "_" + backend] + model_url = quant_model_urls["googlenet_" + backend] else: model_url = model_urls["googlenet"] diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index a161c62fb2e..f9c2f351af1 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -75,7 +75,7 @@ def inception_v3( if not original_aux_logits: model.aux_logits = False model.AuxLogits = None - model_url = quant_model_urls["inception_v3_google" + "_" + backend] + model_url = quant_model_urls["inception_v3_google_" + backend] else: model_url = inception_module.model_urls["inception_v3_google"] diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index b97123bcb00..37c7e99efdd 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -259,7 +259,7 @@ def to_pil_image(pic, mode=None): npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0)) if not isinstance(npimg, np.ndarray): - raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, " + f"not {type(npimg)}") + raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, not {type(npimg)}") if npimg.shape[2] == 1: expected_mode = None @@ -423,7 +423,7 @@ def resize( def scale(*args, **kwargs): - warnings.warn("The use of the transforms.Scale transform is deprecated, " + "please use transforms.Resize instead.") + warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") return resize(*args, **kwargs) diff --git a/torchvision/transforms/functional_pil.py b/torchvision/transforms/functional_pil.py index 08494a8f63d..fdaf5f7de1a 100644 --- a/torchvision/transforms/functional_pil.py +++ b/torchvision/transforms/functional_pil.py @@ -147,9 +147,7 @@ def pad( padding = tuple(padding) if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]: - raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" - ) + raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple") if isinstance(padding, tuple) and len(padding) == 1: # Compatibility with `functional_tensor.pad` diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 91be14b0023..09ae726931c 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -382,7 +382,8 @@ def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor: # crop if needed if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0: - crop_left, crop_right, crop_top, crop_bottom = (-min(x, 0) for x in padding) + neg_min_padding = [-min(x, 0) for x in padding] + crop_left, crop_right, crop_top, crop_bottom = neg_min_padding img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right] padding = [max(x, 0) for x in padding] @@ -421,9 +422,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con padding = list(padding) if isinstance(padding, list) and len(padding) not in [1, 2, 4]: - raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" - ) + raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple") if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 54ecca433e1..1f3d8e23ca7 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -350,9 +350,7 @@ class Scale(Resize): """ def __init__(self, *args, **kwargs): - warnings.warn( - "The use of the transforms.Scale transform is deprecated, " + "please use transforms.Resize instead." - ) + warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") super().__init__(*args, **kwargs) @@ -436,9 +434,7 @@ def __init__(self, padding, fill=0, padding_mode="constant"): raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]: - raise ValueError( - "Padding must be an int or a 1, 2, or 4 element tuple, not a " + f"{len(padding)} element tuple" - ) + raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple") self.padding = padding self.fill = fill From 1bec901f902c48ea184d6c7f97fdbab7a0ecb920 Mon Sep 17 00:00:00 2001 From: Jirka Date: Mon, 11 Oct 2021 21:07:54 +0200 Subject: [PATCH 10/17] ff --- torchvision/models/detection/anchor_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 39ffc6c4a9b..bff285bc567 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -216,7 +216,8 @@ def _grid_default_boxes( for k, f_k in enumerate(grid_sizes): # Now add the default boxes for each width-height pair if self.steps is not None: - x_f_k, y_f_k = (img_shape / self.steps[k] for img_shape in image_size) + x_f_k = image_size[0] / self.steps[k] + y_f_k = image_size[1] / self.steps[k] else: y_f_k, x_f_k = f_k From 2e785dfdba58bfd4655cf2d50905d1d6491c4b03 Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 12 Oct 2021 23:55:17 +0200 Subject: [PATCH 11/17] elif --- references/classification/transforms.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/references/classification/transforms.py b/references/classification/transforms.py index 400830c1188..69ee4182c54 100644 --- a/references/classification/transforms.py +++ b/references/classification/transforms.py @@ -40,11 +40,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """ if batch.ndim != 4: raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}") - elif target.ndim != 1: + if target.ndim != 1: raise ValueError(f"Target ndim should be 1. Got {target.ndim}") - elif not batch.is_floating_point(): + if not batch.is_floating_point(): raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.") - elif target.dtype != torch.int64: + if target.dtype != torch.int64: raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}") if not self.inplace: @@ -116,11 +116,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """ if batch.ndim != 4: raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}") - elif target.ndim != 1: + if target.ndim != 1: raise ValueError(f"Target ndim should be 1. Got {target.ndim}") - elif not batch.is_floating_point(): + if not batch.is_floating_point(): raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.") - elif target.dtype != torch.int64: + if target.dtype != torch.int64: raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}") if not self.inplace: From 8e3997748f6f36ce7245aa84ab1f7457c996d2d4 Mon Sep 17 00:00:00 2001 From: Jirka Date: Wed, 13 Oct 2021 15:12:40 +0200 Subject: [PATCH 12/17] pause hook --- .pre-commit-config.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4d7c783602c..fcead114e19 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,12 +8,12 @@ repos: exclude: packaging/.* - id: end-of-file-fixer - - repo: https://github.com/asottile/pyupgrade - rev: v2.29.0 - hooks: - - id: pyupgrade - args: [--py36-plus] - name: Upgrade code + #- repo: https://github.com/asottile/pyupgrade + # rev: v2.29.0 + # hooks: + # - id: pyupgrade + # args: [--py36-plus] + # name: Upgrade code - repo: https://github.com/omnilib/ufmt rev: v1.3.0 From 439cbb913b793f38756b0d8bf25d556dee790942 Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 26 Oct 2021 18:05:15 +0200 Subject: [PATCH 13/17] fixing --- references/classification/train.py | 14 +++++++------- scripts/release_notes/classify_prs.py | 2 +- scripts/release_notes/retrieve_prs_data.py | 4 ++-- torchvision/models/alexnet.py | 2 +- torchvision/models/densenet.py | 8 ++++---- torchvision/models/detection/backbone_utils.py | 2 +- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/rpn.py | 4 ++-- torchvision/models/googlenet.py | 8 ++++---- torchvision/models/inception.py | 16 ++++++++-------- torchvision/models/mnasnet.py | 8 ++++---- torchvision/models/mobilenetv2.py | 4 ++-- torchvision/models/resnet.py | 6 +++--- torchvision/models/segmentation/_utils.py | 2 +- torchvision/models/segmentation/fcn.py | 2 +- torchvision/models/shufflenetv2.py | 8 ++++---- torchvision/models/squeezenet.py | 4 ++-- torchvision/models/vgg.py | 2 +- torchvision/models/video/resnet.py | 16 ++++++++-------- torchvision/prototype/datasets/utils/_dataset.py | 2 +- 20 files changed, 58 insertions(+), 58 deletions(-) diff --git a/references/classification/train.py b/references/classification/train.py index c211e93524b..27ef12a83a2 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -26,7 +26,7 @@ def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, arg metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}")) - header = "Epoch: [{}]".format(epoch) + header = f"Epoch: [{epoch}]" for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)): start_time = time.time() image, target = image.to(device), target.to(device) @@ -121,7 +121,7 @@ def load_data(traindir, valdir, args): cache_path = _get_cache_path(traindir) if args.cache_dataset and os.path.exists(cache_path): # Attention, as the transforms are also cached! - print("Loading dataset_train from {}".format(cache_path)) + print(f"Loading dataset_train from {cache_path}") dataset, _ = torch.load(cache_path) else: auto_augment_policy = getattr(args, "auto_augment", None) @@ -136,7 +136,7 @@ def load_data(traindir, valdir, args): ), ) if args.cache_dataset: - print("Saving dataset_train to {}".format(cache_path)) + print(f"Saving dataset_train to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset, traindir), cache_path) print("Took", time.time() - st) @@ -145,7 +145,7 @@ def load_data(traindir, valdir, args): cache_path = _get_cache_path(valdir) if args.cache_dataset and os.path.exists(cache_path): # Attention, as the transforms are also cached! - print("Loading dataset_test from {}".format(cache_path)) + print(f"Loading dataset_test from {cache_path}") dataset_test, _ = torch.load(cache_path) else: if not args.weights: @@ -162,7 +162,7 @@ def load_data(traindir, valdir, args): preprocessing, ) if args.cache_dataset: - print("Saving dataset_test to {}".format(cache_path)) + print(f"Saving dataset_test to {cache_path}") utils.mkdir(os.path.dirname(cache_path)) utils.save_on_master((dataset_test, valdir), cache_path) @@ -351,12 +351,12 @@ def main(args): } if model_ema: checkpoint["model_ema"] = model_ema.state_dict() - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "model_{}.pth".format(epoch))) + utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("Training time {}".format(total_time_str)) + print(f"Training time {total_time_str}") def get_args_parser(add_help=True): diff --git a/scripts/release_notes/classify_prs.py b/scripts/release_notes/classify_prs.py index 580e93bfe8b..9b7870a3c51 100644 --- a/scripts/release_notes/classify_prs.py +++ b/scripts/release_notes/classify_prs.py @@ -14,7 +14,7 @@ # In[3]: -all_labels = set(lbl for labels in df["labels"] for lbl in labels) +all_labels = {lbl for labels in df["labels"] for lbl in labels} all_labels diff --git a/scripts/release_notes/retrieve_prs_data.py b/scripts/release_notes/retrieve_prs_data.py index 90cb4cda07e..fb64902a6af 100644 --- a/scripts/release_notes/retrieve_prs_data.py +++ b/scripts/release_notes/retrieve_prs_data.py @@ -96,7 +96,7 @@ def run_query(query): if request.status_code == 200: return request.json() else: - raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query)) + raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}") def gh_labels(pr_number): @@ -151,7 +151,7 @@ def get(self, commit): return self.data[commit] def read_from_disk(self): - with open(self.path, "r") as f: + with open(self.path) as f: data = json.load(f) data = {commit: dict_to_features(dct) for commit, dct in data.items()} return data diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index fd735f4a7a4..bb812febdc4 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -17,7 +17,7 @@ class AlexNet(nn.Module): def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: - super(AlexNet, self).__init__() + super().__init__() _log_api_usage_once(self) self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index d9ff52e55ae..14e318360af 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -26,7 +26,7 @@ class _DenseLayer(nn.Module): def __init__( self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False ) -> None: - super(_DenseLayer, self).__init__() + super().__init__() self.norm1: nn.BatchNorm2d self.add_module("norm1", nn.BatchNorm2d(num_input_features)) self.relu1: nn.ReLU @@ -107,7 +107,7 @@ def __init__( drop_rate: float, memory_efficient: bool = False, ) -> None: - super(_DenseBlock, self).__init__() + super().__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, @@ -128,7 +128,7 @@ def forward(self, init_features: Tensor) -> Tensor: class _Transition(nn.Sequential): def __init__(self, num_input_features: int, num_output_features: int) -> None: - super(_Transition, self).__init__() + super().__init__() self.add_module("norm", nn.BatchNorm2d(num_input_features)) self.add_module("relu", nn.ReLU(inplace=True)) self.add_module("conv", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) @@ -162,7 +162,7 @@ def __init__( memory_efficient: bool = False, ) -> None: - super(DenseNet, self).__init__() + super().__init__() _log_api_usage_once(self) # First convolution diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index 1c2bceacda0..e12ee03c796 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -37,7 +37,7 @@ def __init__( out_channels: int, extra_blocks: Optional[ExtraFPNBlock] = None, ) -> None: - super(BackboneWithFPN, self).__init__() + super().__init__() if extra_blocks is None: extra_blocks = LastLevelMaxPool() diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 1acfac29c7a..c9f552c126a 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -26,7 +26,7 @@ class GeneralizedRCNN(nn.Module): """ def __init__(self, backbone, rpn, roi_heads, transform): - super(GeneralizedRCNN, self).__init__() + super().__init__() _log_api_usage_once(self) self.transform = transform self.backbone = backbone diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index 4994eeba35b..15cec706fbb 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -34,7 +34,7 @@ class RPNHead(nn.Module): """ def __init__(self, in_channels: int, num_anchors: int) -> None: - super(RPNHead, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1) self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1) @@ -132,7 +132,7 @@ def __init__( nms_thresh: float, score_thresh: float = 0.0, ) -> None: - super(RegionProposalNetwork, self).__init__() + super().__init__() self.anchor_generator = anchor_generator self.head = head self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index cfc3cf12309..f9d255aa655 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -75,7 +75,7 @@ def __init__( dropout: float = 0.2, dropout_aux: float = 0.7, ) -> None: - super(GoogLeNet, self).__init__() + super().__init__() _log_api_usage_once(self) if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] @@ -231,7 +231,7 @@ def __init__( pool_proj: int, conv_block: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(Inception, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) @@ -274,7 +274,7 @@ def __init__( conv_block: Optional[Callable[..., nn.Module]] = None, dropout: float = 0.7, ) -> None: - super(InceptionAux, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.conv = conv_block(in_channels, 128, kernel_size=1) @@ -303,7 +303,7 @@ def forward(self, x: Tensor) -> Tensor: class BasicConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None: - super(BasicConv2d, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index a8b7cfc2b2f..d63f94119e1 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -73,7 +73,7 @@ def __init__( init_weights: Optional[bool] = None, dropout: float = 0.5, ) -> None: - super(Inception3, self).__init__() + super().__init__() _log_api_usage_once(self) if inception_blocks is None: inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] @@ -214,7 +214,7 @@ class InceptionA(nn.Module): def __init__( self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionA, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) @@ -251,7 +251,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionB(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionB, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) @@ -281,7 +281,7 @@ class InceptionC(nn.Module): def __init__( self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionC, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) @@ -325,7 +325,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionD(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionD, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) @@ -356,7 +356,7 @@ def forward(self, x: Tensor) -> Tensor: class InceptionE(nn.Module): def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: - super(InceptionE, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) @@ -405,7 +405,7 @@ class InceptionAux(nn.Module): def __init__( self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InceptionAux, self).__init__() + super().__init__() if conv_block is None: conv_block = BasicConv2d self.conv0 = conv_block(in_channels, 128, kernel_size=1) @@ -434,7 +434,7 @@ def forward(self, x: Tensor) -> Tensor: class BasicConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None: - super(BasicConv2d, self).__init__() + super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 7be52b3f9be..5eb27904f90 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -26,7 +26,7 @@ class _InvertedResidual(nn.Module): def __init__( self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1 ) -> None: - super(_InvertedResidual, self).__init__() + super().__init__() assert stride in [1, 2] assert kernel_size in [3, 5] mid_ch = in_ch * expansion_factor @@ -97,7 +97,7 @@ class MNASNet(torch.nn.Module): _version = 2 def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: - super(MNASNet, self).__init__() + super().__init__() _log_api_usage_once(self) assert alpha > 0.0 self.alpha = alpha @@ -193,14 +193,14 @@ def _load_from_state_dict( UserWarning, ) - super(MNASNet, self)._load_from_state_dict( + super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def _load_pretrained(model_name: str, model: nn.Module, progress: bool) -> None: if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None: - raise ValueError("No checkpoint is available for model type {}".format(model_name)) + raise ValueError(f"No checkpoint is available for model type {model_name}") checkpoint_url = _MODEL_URLS[model_name] model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress)) diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 9ccbc229975..68ff85e3cf6 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -42,7 +42,7 @@ class InvertedResidual(nn.Module): def __init__( self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: - super(InvertedResidual, self).__init__() + super().__init__() self.stride = stride assert stride in [1, 2] @@ -110,7 +110,7 @@ def __init__( dropout (float): The droupout probability """ - super(MobileNetV2, self).__init__() + super().__init__() _log_api_usage_once(self) if block is None: diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index bfb23ab2b02..708f250be13 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -68,7 +68,7 @@ def __init__( dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(BasicBlock, self).__init__() + super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: @@ -123,7 +123,7 @@ def __init__( dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(Bottleneck, self).__init__() + super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups @@ -173,7 +173,7 @@ def __init__( replace_stride_with_dilation: Optional[List[bool]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: - super(ResNet, self).__init__() + super().__init__() _log_api_usage_once(self) if norm_layer is None: norm_layer = nn.BatchNorm2d diff --git a/torchvision/models/segmentation/_utils.py b/torchvision/models/segmentation/_utils.py index 9a04af473ef..9780ef5b63c 100644 --- a/torchvision/models/segmentation/_utils.py +++ b/torchvision/models/segmentation/_utils.py @@ -38,6 +38,6 @@ def forward(self, x: Tensor) -> Dict[str, Tensor]: def _load_weights(arch: str, model: nn.Module, model_url: Optional[str], progress: bool) -> None: if model_url is None: - raise ValueError("No checkpoint is available for {}".format(arch)) + raise ValueError(f"No checkpoint is available for {arch}") state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index fe226be2ce1..73f69f86248 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -44,7 +44,7 @@ def __init__(self, in_channels: int, channels: int) -> None: nn.Conv2d(inter_channels, channels, 1), ] - super(FCNHead, self).__init__(*layers) + super().__init__(*layers) def _fcn_resnet( diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index dcfeb687dca..c2af51d8ecf 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -35,7 +35,7 @@ def channel_shuffle(x: Tensor, groups: int) -> Tensor: class InvertedResidual(nn.Module): def __init__(self, inp: int, oup: int, stride: int) -> None: - super(InvertedResidual, self).__init__() + super().__init__() if not (1 <= stride <= 3): raise ValueError("illegal stride value") @@ -99,7 +99,7 @@ def __init__( num_classes: int = 1000, inverted_residual: Callable[..., nn.Module] = InvertedResidual, ) -> None: - super(ShuffleNetV2, self).__init__() + super().__init__() _log_api_usage_once(self) if len(stages_repeats) != 3: @@ -123,7 +123,7 @@ def __init__( self.stage2: nn.Sequential self.stage3: nn.Sequential self.stage4: nn.Sequential - stage_names = ["stage{}".format(i) for i in [2, 3, 4]] + stage_names = [f"stage{i}" for i in [2, 3, 4]] for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]): seq = [inverted_residual(input_channels, output_channels, 2)] for i in range(repeats - 1): @@ -162,7 +162,7 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa if pretrained: model_url = model_urls[arch] if model_url is None: - raise NotImplementedError("pretrained {} is not supported as of now".format(arch)) + raise NotImplementedError(f"pretrained {arch} is not supported as of now") else: state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index ca564c52f54..029593af5ad 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -17,7 +17,7 @@ class Fire(nn.Module): def __init__(self, inplanes: int, squeeze_planes: int, expand1x1_planes: int, expand3x3_planes: int) -> None: - super(Fire, self).__init__() + super().__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) @@ -35,7 +35,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class SqueezeNet(nn.Module): def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None: - super(SqueezeNet, self).__init__() + super().__init__() _log_api_usage_once(self) self.num_classes = num_classes if version == "1_0": diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index ea815ade502..e31fc542ca6 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -36,7 +36,7 @@ class VGG(nn.Module): def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5 ) -> None: - super(VGG, self).__init__() + super().__init__() _log_api_usage_once(self) self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index e2e4eb420e7..58b5aedef06 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -20,7 +20,7 @@ def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super(Conv3DSimple, self).__init__( + super().__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(3, 3, 3), @@ -36,7 +36,7 @@ def get_downsample_stride(stride: int) -> Tuple[int, int, int]: class Conv2Plus1D(nn.Sequential): def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None: - super(Conv2Plus1D, self).__init__( + super().__init__( nn.Conv3d( in_planes, midplanes, @@ -62,7 +62,7 @@ def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super(Conv3DNoTemporal, self).__init__( + super().__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(1, 3, 3), @@ -90,7 +90,7 @@ def __init__( ) -> None: midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) - super(BasicBlock, self).__init__() + super().__init__() self.conv1 = nn.Sequential( conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) ) @@ -125,7 +125,7 @@ def __init__( downsample: Optional[nn.Module] = None, ) -> None: - super(Bottleneck, self).__init__() + super().__init__() midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) # 1x1x1 @@ -166,7 +166,7 @@ class BasicStem(nn.Sequential): """The default conv-batchnorm-relu stem""" def __init__(self) -> None: - super(BasicStem, self).__init__( + super().__init__( nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(inplace=True), @@ -177,7 +177,7 @@ class R2Plus1dStem(nn.Sequential): """R(2+1)D stem is different than the default one as it uses separated 3D convolution""" def __init__(self) -> None: - super(R2Plus1dStem, self).__init__( + super().__init__( nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), nn.BatchNorm3d(45), nn.ReLU(inplace=True), @@ -208,7 +208,7 @@ def __init__( num_classes (int, optional): Dimension of the final FC layer. Defaults to 400. zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. """ - super(VideoResNet, self).__init__() + super().__init__() _log_api_usage_once(self) self.inplanes = 64 diff --git a/torchvision/prototype/datasets/utils/_dataset.py b/torchvision/prototype/datasets/utils/_dataset.py index fa1b5e6478b..190e4f4cd76 100644 --- a/torchvision/prototype/datasets/utils/_dataset.py +++ b/torchvision/prototype/datasets/utils/_dataset.py @@ -70,7 +70,7 @@ def __init__( @staticmethod def read_categories_file(path: pathlib.Path) -> List[List[str]]: - with open(path, "r", newline="") as file: + with open(path, newline="") as file: return [row for row in csv.reader(file)] @property From 55e3477f1ffaaea43e807422c6c61a9ba0cde3a8 Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 26 Oct 2021 22:15:06 +0200 Subject: [PATCH 14/17] fixing --- torchvision/transforms/_transforms_video.py | 5 +++-- torchvision/transforms/transforms.py | 14 ++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/torchvision/transforms/_transforms_video.py b/torchvision/transforms/_transforms_video.py index 629a42f4d21..440a75f286c 100644 --- a/torchvision/transforms/_transforms_video.py +++ b/torchvision/transforms/_transforms_video.py @@ -77,8 +77,9 @@ def __call__(self, clip): return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self): - return self.__class__.__name__ + "(size={}, interpolation_mode={}, scale={}, ratio={})".format( - self.size, self.interpolation_mode, self.scale, self.ratio + return ( + self.__class__.__name__ + + f"(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" ) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 1f3d8e23ca7..47815c60b4e 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -338,10 +338,8 @@ def forward(self, img): return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias) def __repr__(self): - interpolate_str = self.interpolation.value - return self.__class__.__name__ + "(size={}, interpolation={}, max_size={}, antialias={})".format( - self.size, interpolate_str, self.max_size, self.antialias - ) + detail = f"(size={self.size}, interpolation={self.interpolation.value}, max_size={self.max_size}, antialias={self.antialias})" + return self.__class__.__name__ + detail class Scale(Resize): @@ -434,7 +432,9 @@ def __init__(self, padding, fill=0, padding_mode="constant"): raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]: - raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple") + raise ValueError( + f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple" + ) self.padding = padding self.fill = fill @@ -451,9 +451,7 @@ def forward(self, img): return F.pad(img, self.padding, self.fill, self.padding_mode) def __repr__(self): - return self.__class__.__name__ + "(padding={}, fill={}, padding_mode={})".format( - self.padding, self.fill, self.padding_mode - ) + return self.__class__.__name__ + f"(padding={self.padding}, fill={self.fill}, padding_mode={self.padding_mode})" class Lambda: From 1da64c08385c89e5d930303a971063529a60e335 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 28 Oct 2021 10:55:37 +0100 Subject: [PATCH 15/17] Fix remaining f-strings --- .pre-commit-config.yaml | 12 ++++++------ gallery/plot_video_api.py | 2 +- references/classification/train.py | 6 +++--- references/classification/utils.py | 3 +-- references/detection/train.py | 3 +-- references/segmentation/train.py | 3 +-- references/video_classification/train.py | 3 +-- test/test_functional_tensor.py | 12 +++--------- test/test_models.py | 12 ++++++------ test/test_transforms.py | 8 ++------ torchvision/datasets/cifar.py | 3 ++- torchvision/datasets/cityscapes.py | 8 ++++---- torchvision/datasets/mnist.py | 3 ++- torchvision/datasets/phototour.py | 3 ++- torchvision/extension.py | 7 +++---- torchvision/io/_video_opt.py | 6 +----- torchvision/models/detection/backbone_utils.py | 2 +- torchvision/models/detection/generalized_rcnn.py | 8 +++----- torchvision/models/detection/retinanet.py | 2 +- torchvision/models/detection/ssd.py | 2 +- torchvision/models/detection/transform.py | 4 +--- torchvision/models/googlenet.py | 2 +- torchvision/models/mobilenetv2.py | 3 +-- torchvision/models/resnet.py | 2 +- torchvision/models/squeezenet.py | 2 +- torchvision/ops/deform_conv.py | 4 +--- torchvision/prototype/models/_api.py | 2 +- torchvision/transforms/functional.py | 3 +-- torchvision/transforms/transforms.py | 14 +++++--------- 29 files changed, 58 insertions(+), 86 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fcead114e19..920916030be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,12 +8,12 @@ repos: exclude: packaging/.* - id: end-of-file-fixer - #- repo: https://github.com/asottile/pyupgrade - # rev: v2.29.0 - # hooks: - # - id: pyupgrade - # args: [--py36-plus] - # name: Upgrade code + # - repo: https://github.com/asottile/pyupgrade + # rev: v2.29.0 + # hooks: + # - id: pyupgrade + # args: [--py36-plus] + # name: Upgrade code - repo: https://github.com/omnilib/ufmt rev: v1.3.0 diff --git a/gallery/plot_video_api.py b/gallery/plot_video_api.py index fe296d67be0..5ab03af08a6 100644 --- a/gallery/plot_video_api.py +++ b/gallery/plot_video_api.py @@ -137,7 +137,7 @@ def example_read_video(video_object, start=0, end=None, read_video=True, read_au if end < start: raise ValueError( "end time should be larger than start time, got " - "start time={} and end time={}".format(start, end) + f"start time={start} and end time={end}" ) video_frames = torch.empty(0) diff --git a/references/classification/train.py b/references/classification/train.py index 27ef12a83a2..79ba21b263d 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -270,8 +270,8 @@ def main(args): main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.lr_gamma) else: raise RuntimeError( - "Invalid lr scheduler '{}'. Only StepLR, CosineAnnealingLR and ExponentialLR " - "are supported.".format(args.lr_scheduler) + f"Invalid lr scheduler '{args.lr_scheduler}'. Only StepLR, CosineAnnealingLR and ExponentialLR " + "are supported." ) if args.lr_warmup_epochs > 0: @@ -285,7 +285,7 @@ def main(args): ) else: raise RuntimeError( - f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant " "are supported." + f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported." ) lr_scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[args.lr_warmup_epochs] diff --git a/references/classification/utils.py b/references/classification/utils.py index cb12ad33366..562c16aac2c 100644 --- a/references/classification/utils.py +++ b/references/classification/utils.py @@ -307,8 +307,7 @@ def average_checkpoints(inputs): params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( - "For checkpoint {}, expected list of params: {}, " - "but found: {}".format(f, params_keys, model_params_keys) + f"For checkpoint {f}, expected list of params: {params_keys}, " f"but found: {model_params_keys}" ) for k in params_keys: p = model_params[k] diff --git a/references/detection/train.py b/references/detection/train.py index 85837be4954..ce74ff22b30 100644 --- a/references/detection/train.py +++ b/references/detection/train.py @@ -197,8 +197,7 @@ def main(args): lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) else: raise RuntimeError( - "Invalid lr scheduler '{}'. Only MultiStepLR and CosineAnnealingLR " - "are supported.".format(args.lr_scheduler) + f"Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported." ) if args.resume: diff --git a/references/segmentation/train.py b/references/segmentation/train.py index 47414136a9c..35f198c707a 100644 --- a/references/segmentation/train.py +++ b/references/segmentation/train.py @@ -152,8 +152,7 @@ def main(args): ) else: raise RuntimeError( - "Invalid warmup lr method '{}'. Only linear and constant " - "are supported.".format(args.lr_warmup_method) + f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported." ) lr_scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[warmup_iters] diff --git a/references/video_classification/train.py b/references/video_classification/train.py index 52b794e046b..968ea552dc7 100644 --- a/references/video_classification/train.py +++ b/references/video_classification/train.py @@ -232,8 +232,7 @@ def main(args): ) else: raise RuntimeError( - "Invalid warmup lr method '{}'. Only linear and constant " - "are supported.".format(args.lr_warmup_method) + f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported." ) lr_scheduler = torch.optim.lr_scheduler.SequentialLR( diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 7b3f2566ee5..24a7523b62a 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -225,9 +225,7 @@ def test_square_rotations(self, device, height, width, dt, angle, config, fn): num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] # Tolerance : less than 6% of different pixels - assert ratio_diff_pixels < 0.06, "{}\n{} vs \n{}".format( - ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] - ) + assert ratio_diff_pixels < 0.06 @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("height, width", [(32, 26)]) @@ -258,9 +256,7 @@ def test_rect_rotations(self, device, height, width, dt, angle, fn): num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] # Tolerance : less than 3% of different pixels - assert ratio_diff_pixels < 0.03, "{}: {}\n{} vs \n{}".format( - angle, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] - ) + assert ratio_diff_pixels < 0.03 @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("height, width", [(26, 26), (32, 26)]) @@ -346,9 +342,7 @@ def test_all_ops(self, device, height, width, dt, a, t, s, sh, f, fn): ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels tol = 0.06 if device == "cuda" else 0.05 - assert ratio_diff_pixels < tol, "{}: {}\n{} vs \n{}".format( - (NEAREST, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] - ) + assert ratio_diff_pixels < tol @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("dt", ALL_DTYPES) diff --git a/test/test_models.py b/test/test_models.py index 077ed36c4ae..da70f57f902 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -99,12 +99,12 @@ def get_export_import_copy(m): if not TEST_WITH_SLOW or skip: # TorchScript is not enabled, skip these tests msg = ( - "The check_jit_scriptable test for {} was skipped. " + f"The check_jit_scriptable test for {nn_module.__class__.__name__} was skipped. " "This test checks if the module's results in TorchScript " "match eager and that it can be exported. To run these " "tests make sure you set the environment variable " "PYTORCH_TEST_WITH_SLOW=1 and that the test is not " - "manually skipped.".format(nn_module.__class__.__name__) + "manually skipped." ) warnings.warn(msg, RuntimeWarning) return None @@ -541,10 +541,10 @@ def check_out(out): if not full_validation: msg = ( - "The output of {} could only be partially validated. " + f"The output of {test_segmentation_model.__name__} could only be partially validated. " "This is likely due to unit-test flakiness, but you may " "want to do additional manual checks if you made " - "significant changes to the codebase.".format(test_segmentation_model.__name__) + "significant changes to the codebase." ) warnings.warn(msg, RuntimeWarning) pytest.skip(msg) @@ -638,10 +638,10 @@ def compute_mean_std(tensor): if not full_validation: msg = ( - "The output of {} could only be partially validated. " + f"The output of {test_detection_model.__name__} could only be partially validated. " "This is likely due to unit-test flakiness, but you may " "want to do additional manual checks if you made " - "significant changes to the codebase.".format(test_detection_model.__name__) + "significant changes to the codebase." ) warnings.warn(msg, RuntimeWarning) pytest.skip(msg) diff --git a/test/test_transforms.py b/test/test_transforms.py index c55194ded9a..cd9f37ba994 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -2121,12 +2121,8 @@ def test_random_affine(): for _ in range(100): angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear, img_size=img.size) assert -10 < angle < 10 - assert -img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5, "{} vs {}".format( - translations[0], img.size[0] * 0.5 - ) - assert -img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5, "{} vs {}".format( - translations[1], img.size[1] * 0.5 - ) + assert -img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5 + assert -img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5 assert 0.7 < scale < 1.3 assert -10 < shear[0] < 10 assert -20 < shear[1] < 40 diff --git a/torchvision/datasets/cifar.py b/torchvision/datasets/cifar.py index 7f57e42bc19..90794911147 100644 --- a/torchvision/datasets/cifar.py +++ b/torchvision/datasets/cifar.py @@ -142,7 +142,8 @@ def download(self) -> None: download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) def extra_repr(self) -> str: - return "Split: {}".format("Train" if self.train is True else "Test") + split = "Train" if self.train is True else "Test" + return f"Split: {split}" class CIFAR100(CIFAR10): diff --git a/torchvision/datasets/cityscapes.py b/torchvision/datasets/cityscapes.py index 6836797b6b3..b07c093e10c 100644 --- a/torchvision/datasets/cityscapes.py +++ b/torchvision/datasets/cityscapes.py @@ -139,14 +139,14 @@ def __init__( if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): if split == "train_extra": - image_dir_zip = os.path.join(self.root, "leftImg8bit{}".format("_trainextra.zip")) + image_dir_zip = os.path.join(self.root, "leftImg8bit_trainextra.zip") else: - image_dir_zip = os.path.join(self.root, "leftImg8bit{}".format("_trainvaltest.zip")) + image_dir_zip = os.path.join(self.root, "leftImg8bit_trainvaltest.zip") if self.mode == "gtFine": - target_dir_zip = os.path.join(self.root, "{}{}".format(self.mode, "_trainvaltest.zip")) + target_dir_zip = os.path.join(self.root, f"{self.mode}_trainvaltest.zip") elif self.mode == "gtCoarse": - target_dir_zip = os.path.join(self.root, "{}{}".format(self.mode, ".zip")) + target_dir_zip = os.path.join(self.root, f"{self.mode}.zip") if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip): extract_archive(from_path=image_dir_zip, to_path=self.root) diff --git a/torchvision/datasets/mnist.py b/torchvision/datasets/mnist.py index 4a1f1f4a0c2..660de3d420f 100644 --- a/torchvision/datasets/mnist.py +++ b/torchvision/datasets/mnist.py @@ -195,7 +195,8 @@ def download(self) -> None: raise RuntimeError(f"Error downloading {filename}") def extra_repr(self) -> str: - return "Split: {}".format("Train" if self.train is True else "Test") + split = "Train" if self.train is True else "Test" + return f"Split: {split}" class FashionMNIST(MNIST): diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py index da1b0a9716d..edf1d2ee256 100644 --- a/torchvision/datasets/phototour.py +++ b/torchvision/datasets/phototour.py @@ -174,7 +174,8 @@ def cache(self) -> None: torch.save(dataset, f) def extra_repr(self) -> str: - return "Split: {}".format("Train" if self.train is True else "Test") + split = "Train" if self.train is True else "Test" + return f"Split: {split}" def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor: diff --git a/torchvision/extension.py b/torchvision/extension.py index b59bed94dff..69f34891837 100644 --- a/torchvision/extension.py +++ b/torchvision/extension.py @@ -60,10 +60,9 @@ def _check_cuda_version(): if t_major != tv_major or t_minor != tv_minor: raise RuntimeError( "Detected that PyTorch and torchvision were compiled with different CUDA versions. " - "PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. " - "Please reinstall the torchvision that matches your PyTorch install.".format( - t_major, t_minor, tv_major, tv_minor - ) + f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has " + f"CUDA Version={tv_major}.{tv_minor}. " + "Please reinstall the torchvision that matches your PyTorch install." ) return _version diff --git a/torchvision/io/_video_opt.py b/torchvision/io/_video_opt.py index 560491e60c5..b1eb5bd8055 100644 --- a/torchvision/io/_video_opt.py +++ b/torchvision/io/_video_opt.py @@ -73,11 +73,7 @@ def _validate_pts(pts_range): if pts_range[1] > 0: assert ( pts_range[0] <= pts_range[1] - ), """Start pts should not be smaller than end pts, got - start pts: {:d} and end pts: {:d}""".format( - pts_range[0], - pts_range[1], - ) + ), f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]:d} and end pts: {pts_range[1]:d}" def _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration): diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index e12ee03c796..54fdc4c05c9 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -145,7 +145,7 @@ def _validate_trainable_layers( warnings.warn( "Changing trainable_backbone_layers has not effect if " "neither pretrained nor pretrained_backbone have been set to True, " - "falling back to trainable_backbone_layers={} so that all layers are trainable".format(max_value) + f"falling back to trainable_backbone_layers={max_value} so that all layers are trainable" ) trainable_backbone_layers = max_value diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index c9f552c126a..bd4ff74cea0 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -65,11 +65,9 @@ def forward(self, images, targets=None): boxes = target["boxes"] if isinstance(boxes, torch.Tensor): if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError( - "Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape) - ) + raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes))) + raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") original_image_sizes: List[Tuple[int, int]] = [] for img in images: @@ -91,7 +89,7 @@ def forward(self, images, targets=None): degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError( "All bounding boxes should have positive height and width." - " Found invalid box {} for target at index {}.".format(degen_bb, target_idx) + f" Found invalid box {degen_bb} for target at index {target_idx}." ) features = self.backbone(images.tensors) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index c27bee64be4..4b1e2a2c774 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -519,7 +519,7 @@ def forward(self, images, targets=None): degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError( "All bounding boxes should have positive height and width." - " Found invalid box {} for target at index {}.".format(degen_bb, target_idx) + f" Found invalid box {degen_bb} for target at index {target_idx}." ) # get the features from the backbone diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 7434dfe1531..be30bb54c40 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -337,7 +337,7 @@ def forward( degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError( "All bounding boxes should have positive height and width." - " Found invalid box {} for target at index {}.".format(degen_bb, target_idx) + f" Found invalid box {degen_bb} for target at index {target_idx}." ) # get the features from the backbone diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 53b276887cf..90d19cfc8de 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -263,9 +263,7 @@ def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" _indent = "\n " format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})" - format_string += "{}Resize(min_size={}, max_size={}, mode='bilinear')".format( - _indent, self.min_size, self.max_size - ) + format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')" format_string += "\n)" return format_string diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index f9d255aa655..bcc43095315 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -45,7 +45,7 @@ def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> kwargs["aux_logits"] = False if kwargs["aux_logits"]: warnings.warn( - "auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them" + "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them" ) original_aux_logits = kwargs["aux_logits"] kwargs["aux_logits"] = True diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 68ff85e3cf6..4309ed00df6 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -137,8 +137,7 @@ def __init__( # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError( - "inverted_residual_setting should be non-empty " - "or a 4-element list, got {}".format(inverted_residual_setting) + "inverted_residual_setting should be non-empty " f"or a 4-element list, got {inverted_residual_setting}" ) # building first layer diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 708f250be13..b0bb8d13ade 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -188,7 +188,7 @@ def __init__( if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation) + f"or a 3-element tuple, got {replace_stride_with_dilation}" ) self.groups = groups self.base_width = width_per_group diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 029593af5ad..2c1a30f225d 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -74,7 +74,7 @@ def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float # FIXME: Is this needed? SqueezeNet should only be called from the # FIXME: squeezenet1_x() functions # FIXME: This checking is not done for the other models - raise ValueError("Unsupported SqueezeNet version {version}:" "1_0 or 1_1 expected".format(version=version)) + raise ValueError(f"Unsupported SqueezeNet version {version}: 1_0 or 1_1 expected") # Final convolution is initialized differently from the rest final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index cd2225272d5..1f7c3ce40fe 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -83,9 +83,7 @@ def deform_conv2d( raise RuntimeError( "the shape of the offset tensor at dimension 1 is not valid. It should " "be a multiple of 2 * weight.size[2] * weight.size[3].\n" - "Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format( - offset.shape[1], 2 * weights_h * weights_w - ) + f"Got offset.shape[1]={offset.shape[1]}, while 2 * weight.size[2] * weight.size[3]={2 * weights_h * weights_w}" ) return torch.ops.torchvision.deform_conv2d( diff --git a/torchvision/prototype/models/_api.py b/torchvision/prototype/models/_api.py index 9c85402df64..3e436a2877c 100644 --- a/torchvision/prototype/models/_api.py +++ b/torchvision/prototype/models/_api.py @@ -106,7 +106,7 @@ def get_weight(fn: Callable, weight_name: str) -> Weights: if weights_class is None: raise ValueError( - "The weight class for the specific method couldn't be retrieved. Make sure the typing info is " "correct." + "The weight class for the specific method couldn't be retrieved. Make sure the typing info is correct." ) return weights_class.from_str(weight_name) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 37c7e99efdd..0912e48cf15 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -330,8 +330,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool if tensor.ndim < 3: raise ValueError( - "Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = " - "{}.".format(tensor.size()) + "Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = " f"{tensor.size()}" ) if not inplace: diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index f20ca563b18..a409ff3cbb8 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -1084,22 +1084,18 @@ def __init__(self, transformation_matrix, mean_vector): if transformation_matrix.size(0) != transformation_matrix.size(1): raise ValueError( "transformation_matrix should be square. Got " - + "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()) + f"{tuple(transformation_matrix.size())} rectangular matrix." ) if mean_vector.size(0) != transformation_matrix.size(0): raise ValueError( f"mean_vector should have the same length {mean_vector.size(0)}" - + " as any one of the dimensions of the transformation_matrix [{}]".format( - tuple(transformation_matrix.size()) - ) + f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]" ) if transformation_matrix.device != mean_vector.device: raise ValueError( - "Input tensors should be on the same device. Got {} and {}".format( - transformation_matrix.device, mean_vector.device - ) + f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}" ) self.transformation_matrix = transformation_matrix @@ -1125,7 +1121,7 @@ def forward(self, tensor: Tensor) -> Tensor: if tensor.device.type != self.mean_vector.device.type: raise ValueError( "Input tensor should be on the same device as transformation matrix and mean vector. " - "Got {} vs {}".format(tensor.device, self.mean_vector.device) + f"Got {tensor.device} vs {self.mean_vector.device}" ) flat_tensor = tensor.view(-1, n) - self.mean_vector @@ -1720,7 +1716,7 @@ def forward(self, img): if value is not None and not (len(value) in (1, img.shape[-3])): raise ValueError( "If value is a sequence, it should have either a single value or " - "{} (number of input channels)".format(img.shape[-3]) + f"{img.shape[-3]} (number of input channels)" ) x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value) From 331ac7d4fecd279096039a55c58ed25f47682bd1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 28 Oct 2021 11:15:48 +0100 Subject: [PATCH 16/17] Some more weird stuff --- references/classification/utils.py | 2 +- torchvision/models/mobilenetv2.py | 2 +- torchvision/prototype/datasets/utils/_internal.py | 2 +- torchvision/prototype/models/_api.py | 2 +- torchvision/transforms/functional.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/references/classification/utils.py b/references/classification/utils.py index 562c16aac2c..473684fe162 100644 --- a/references/classification/utils.py +++ b/references/classification/utils.py @@ -307,7 +307,7 @@ def average_checkpoints(inputs): params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( - f"For checkpoint {f}, expected list of params: {params_keys}, " f"but found: {model_params_keys}" + f"For checkpoint {f}, expected list of params: {params_keys}, but found: {model_params_keys}" ) for k in params_keys: p = model_params[k] diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 4309ed00df6..1a470953df5 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -137,7 +137,7 @@ def __init__( # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError( - "inverted_residual_setting should be non-empty " f"or a 4-element list, got {inverted_residual_setting}" + f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}" ) # building first layer diff --git a/torchvision/prototype/datasets/utils/_internal.py b/torchvision/prototype/datasets/utils/_internal.py index fdb6a17c46f..7cef15f9513 100644 --- a/torchvision/prototype/datasets/utils/_internal.py +++ b/torchvision/prototype/datasets/utils/_internal.py @@ -63,7 +63,7 @@ def sequence_to_str(seq: Sequence, separate_last: str = "") -> str: if len(seq) == 1: return f"'{seq[0]}'" - return f"""'{"', '".join([str(item) for item in seq[:-1]])}', """ f"""{separate_last}'{seq[-1]}'.""" + return f"""'{"', '".join([str(item) for item in seq[:-1]])}', {separate_last}'{seq[-1]}'.""" def add_suggestion( diff --git a/torchvision/prototype/models/_api.py b/torchvision/prototype/models/_api.py index 3e436a2877c..4961d7def50 100644 --- a/torchvision/prototype/models/_api.py +++ b/torchvision/prototype/models/_api.py @@ -52,7 +52,7 @@ def verify(cls, obj: Any) -> Any: obj = cls.from_str(obj) elif not isinstance(obj, cls) and not isinstance(obj, WeightEntry): raise TypeError( - f"Invalid Weight class provided; expected {cls.__name__} " f"but received {obj.__class__.__name__}." + f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}." ) return obj diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 0912e48cf15..bd5b170626e 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -330,7 +330,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool if tensor.ndim < 3: raise ValueError( - "Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = " f"{tensor.size()}" + f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {tensor.size()}" ) if not inplace: From fc9aa7ffa80cde539d226132adab1d7f55d346c3 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 28 Oct 2021 11:52:12 +0100 Subject: [PATCH 17/17] revert some changes to torchvision/io/_video_opt.py - jit error --- torchvision/io/_video_opt.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/torchvision/io/_video_opt.py b/torchvision/io/_video_opt.py index b1eb5bd8055..630cbe07781 100644 --- a/torchvision/io/_video_opt.py +++ b/torchvision/io/_video_opt.py @@ -73,7 +73,11 @@ def _validate_pts(pts_range): if pts_range[1] > 0: assert ( pts_range[0] <= pts_range[1] - ), f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]:d} and end pts: {pts_range[1]:d}" + ), """Start pts should not be smaller than end pts, got + start pts: {0:d} and end pts: {1:d}""".format( + pts_range[0], + pts_range[1], + ) def _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration):