Skip to content

Commit 3d60f49

Browse files
[*.py] Rename "Arguments:" to "Args:" (#3203)
Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent ca6fdd6 commit 3d60f49

28 files changed

+73
-73
lines changed

references/detection/group_by_aspect_ratio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class GroupedBatchSampler(BatchSampler):
2626
It enforces that the batch only contain elements from the same group.
2727
It also tries to provide mini-batches which follows an ordering which is
2828
as close as possible to the ordering from the original sampler.
29-
Arguments:
29+
Args:
3030
sampler (Sampler): Base sampler.
3131
group_ids (list[int]): If the sampler produces indices in range [0, N),
3232
`group_ids` must be a list of `N` ints which contains the group id of each sample.

torchvision/datasets/samplers/clip_sampler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ class UniformClipSampler(Sampler):
111111
When number of unique clips in the video is fewer than num_video_clips_per_video,
112112
repeat the clips until `num_video_clips_per_video` clips are collected
113113
114-
Arguments:
114+
Args:
115115
video_clips (VideoClips): video clips to sample from
116116
num_clips_per_video (int): number of clips to be sampled per video
117117
"""
@@ -151,7 +151,7 @@ class RandomClipSampler(Sampler):
151151
"""
152152
Samples at most `max_video_clips_per_video` clips for each video randomly
153153
154-
Arguments:
154+
Args:
155155
video_clips (VideoClips): video clips to sample from
156156
max_clips_per_video (int): maximum number of clips to be sampled per video
157157
"""

torchvision/datasets/video_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ class VideoClips(object):
8888
Recreating the clips for different clip lengths is fast, and can be done
8989
with the `compute_clips` method.
9090
91-
Arguments:
91+
Args:
9292
video_paths (List[str]): paths to the video files
9393
clip_length_in_frames (int): size of a clip in number of frames
9494
frames_between_clips (int): step (in frames) between each clip
@@ -227,7 +227,7 @@ def compute_clips(self, num_frames, step, frame_rate=None):
227227
Always returns clips of size `num_frames`, meaning that the
228228
last few frames in a video can potentially be dropped.
229229
230-
Arguments:
230+
Args:
231231
num_frames (int): number of frames for the clip
232232
step (int): distance between two clips
233233
"""
@@ -285,7 +285,7 @@ def get_clip(self, idx):
285285
"""
286286
Gets a subclip from a list of videos.
287287
288-
Arguments:
288+
Args:
289289
idx (int): index of the subclip. Must be between 0 and num_clips().
290290
291291
Returns:

torchvision/io/image.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def read_file(path: str) -> torch.Tensor:
7171
Reads and outputs the bytes contents of a file as a uint8 Tensor
7272
with one dimension.
7373
74-
Arguments:
74+
Args:
7575
path (str): the path to the file to be read
7676
7777
Returns:
@@ -86,7 +86,7 @@ def write_file(filename: str, data: torch.Tensor) -> None:
8686
Writes the contents of a uint8 tensor with one dimension to a
8787
file.
8888
89-
Arguments:
89+
Args:
9090
filename (str): the path to the file to be written
9191
data (Tensor): the contents to be written to the output file
9292
"""
@@ -99,7 +99,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE
9999
Optionally converts the image to the desired format.
100100
The values of the output tensor are uint8 between 0 and 255.
101101
102-
Arguments:
102+
Args:
103103
input (Tensor[1]): a one dimensional uint8 tensor containing
104104
the raw bytes of the PNG image.
105105
mode (ImageReadMode): the read mode used for optionally
@@ -162,7 +162,7 @@ def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANG
162162
Optionally converts the image to the desired format.
163163
The values of the output tensor are uint8 between 0 and 255.
164164
165-
Arguments:
165+
Args:
166166
input (Tensor[1]): a one dimensional uint8 tensor containing
167167
the raw bytes of the JPEG image.
168168
mode (ImageReadMode): the read mode used for optionally

torchvision/models/_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class IntermediateLayerGetter(nn.ModuleDict):
1818
assigned to the model. So if `model` is passed, `model.feature1` can
1919
be returned, but not `model.feature1.layer2`.
2020
21-
Arguments:
21+
Args:
2222
model (nn.Module): model on which we will extract the features
2323
return_layers (Dict[name, new_name]): a dict containing the names
2424
of the modules for which the activations will be returned as

torchvision/models/detection/_utils.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class BalancedPositiveNegativeSampler(object):
1515
def __init__(self, batch_size_per_image, positive_fraction):
1616
# type: (int, float) -> None
1717
"""
18-
Arguments:
18+
Args:
1919
batch_size_per_image (int): number of elements to be selected per image
2020
positive_fraction (float): percentace of positive elements per batch
2121
"""
@@ -25,7 +25,7 @@ def __init__(self, batch_size_per_image, positive_fraction):
2525
def __call__(self, matched_idxs):
2626
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
2727
"""
28-
Arguments:
28+
Args:
2929
matched idxs: list of tensors containing -1, 0 or positive values.
3030
Each tensor corresponds to a specific image.
3131
-1 values are ignored, 0 are considered as negatives and > 0 as
@@ -83,7 +83,7 @@ def encode_boxes(reference_boxes, proposals, weights):
8383
Encode a set of proposals with respect to some
8484
reference boxes
8585
86-
Arguments:
86+
Args:
8787
reference_boxes (Tensor): reference boxes
8888
proposals (Tensor): boxes to be encoded
8989
"""
@@ -133,7 +133,7 @@ class BoxCoder(object):
133133
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
134134
# type: (Tuple[float, float, float, float], float) -> None
135135
"""
136-
Arguments:
136+
Args:
137137
weights (4-element tuple)
138138
bbox_xform_clip (float)
139139
"""
@@ -153,7 +153,7 @@ def encode_single(self, reference_boxes, proposals):
153153
Encode a set of proposals with respect to some
154154
reference boxes
155155
156-
Arguments:
156+
Args:
157157
reference_boxes (Tensor): reference boxes
158158
proposals (Tensor): boxes to be encoded
159159
"""
@@ -183,7 +183,7 @@ def decode_single(self, rel_codes, boxes):
183183
From a set of original boxes and encoded relative box offsets,
184184
get the decoded boxes.
185185
186-
Arguments:
186+
Args:
187187
rel_codes (Tensor): encoded boxes
188188
boxes (Tensor): reference boxes.
189189
"""
@@ -361,7 +361,7 @@ def overwrite_eps(model, eps):
361361
only when the pretrained weights are loaded to maintain compatibility
362362
with previous versions.
363363
364-
Arguments:
364+
Args:
365365
model (nn.Module): The model on which we perform the overwrite.
366366
eps (float): The new value of eps.
367367
"""

torchvision/models/detection/anchor_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class AnchorGenerator(nn.Module):
2222
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
2323
per spatial location for feature map i.
2424
25-
Arguments:
25+
Args:
2626
sizes (Tuple[Tuple[int]]):
2727
aspect_ratios (Tuple[Tuple[float]]):
2828
"""

torchvision/models/detection/backbone_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class BackboneWithFPN(nn.Module):
1414
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
1515
extract a submodel that returns the feature maps specified in return_layers.
1616
The same limitations of IntermediatLayerGetter apply here.
17-
Arguments:
17+
Args:
1818
backbone (nn.Module)
1919
return_layers (Dict[name, new_name]): a dict containing the names
2020
of the modules for which the activations will be returned as
@@ -73,7 +73,7 @@ def resnet_fpn_backbone(
7373
>>> ('3', torch.Size([1, 256, 2, 2])),
7474
>>> ('pool', torch.Size([1, 256, 1, 1]))]
7575
76-
Arguments:
76+
Args:
7777
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
7878
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
7979
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:

torchvision/models/detection/faster_rcnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class FasterRCNN(GeneralizedRCNN):
4949
- labels (Int64Tensor[N]): the predicted labels for each image
5050
- scores (Tensor[N]): the scores or each prediction
5151
52-
Arguments:
52+
Args:
5353
backbone (nn.Module): the network used to compute the features for the model.
5454
It should contain a out_channels attribute, which indicates the number of output
5555
channels that each feature map has (and it should be the same for all feature maps).
@@ -239,7 +239,7 @@ class TwoMLPHead(nn.Module):
239239
"""
240240
Standard heads for FPN-based models
241241
242-
Arguments:
242+
Args:
243243
in_channels (int): number of input channels
244244
representation_size (int): size of the intermediate representation
245245
"""
@@ -264,7 +264,7 @@ class FastRCNNPredictor(nn.Module):
264264
Standard classification + bounding box regression layers
265265
for Fast R-CNN.
266266
267-
Arguments:
267+
Args:
268268
in_channels (int): number of input channels
269269
num_classes (int): number of output classes (including background)
270270
"""
@@ -341,7 +341,7 @@ def fasterrcnn_resnet50_fpn(pretrained=False, progress=True,
341341
>>> # optionally, if you want to export the model to ONNX:
342342
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
343343
344-
Arguments:
344+
Args:
345345
pretrained (bool): If True, returns a model pre-trained on COCO train2017
346346
progress (bool): If True, displays a progress bar of the download to stderr
347347
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet

torchvision/models/detection/generalized_rcnn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class GeneralizedRCNN(nn.Module):
1414
"""
1515
Main class for Generalized R-CNN.
1616
17-
Arguments:
17+
Args:
1818
backbone (nn.Module):
1919
rpn (nn.Module):
2020
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
@@ -43,7 +43,7 @@ def eager_outputs(self, losses, detections):
4343
def forward(self, images, targets=None):
4444
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
4545
"""
46-
Arguments:
46+
Args:
4747
images (list[Tensor]): images to be processed
4848
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
4949

0 commit comments

Comments
 (0)