Skip to content

ignore git warning 'globally' #6833

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Oct 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 9 additions & 20 deletions test/prototype_transforms_kernel_infos.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import functools
import itertools
import math
import re

import numpy as np
import pytest
Expand Down Expand Up @@ -159,12 +158,6 @@ def sample_inputs_horizontal_flip_video():
KernelInfo(
F.horizontal_flip_bounding_box,
sample_inputs_fn=sample_inputs_horizontal_flip_bounding_box,
test_marks=[
TestMark(
("TestKernels", "test_scripted_vs_eager"),
pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %72')}:UserWarning"),
)
],
),
KernelInfo(
F.horizontal_flip_mask,
Expand Down Expand Up @@ -2045,17 +2038,11 @@ def sample_inputs_convert_dtype_video():
yield ArgsKwargs(video_loader)


_common_convert_dtype_marks = [
TestMark(
("TestKernels", "test_dtype_and_device_consistency"),
pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"),
condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32),
),
TestMark(
("TestKernels", "test_scripted_vs_eager"),
pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %')}:UserWarning"),
),
]
skip_dtype_consistency = TestMark(
("TestKernels", "test_dtype_and_device_consistency"),
pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"),
condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32),
)

KERNEL_INFOS.extend(
[
Expand All @@ -2065,7 +2052,7 @@ def sample_inputs_convert_dtype_video():
reference_fn=reference_convert_dtype_image_tensor,
reference_inputs_fn=reference_inputs_convert_dtype_image_tensor,
test_marks=[
*_common_convert_dtype_marks,
skip_dtype_consistency,
TestMark(
("TestKernels", "test_against_reference"),
pytest.mark.xfail(reason="Conversion overflows"),
Expand All @@ -2083,7 +2070,9 @@ def sample_inputs_convert_dtype_video():
KernelInfo(
F.convert_dtype_video,
sample_inputs_fn=sample_inputs_convert_dtype_video,
test_marks=_common_convert_dtype_marks,
test_marks=[
skip_dtype_consistency,
],
),
]
)
Expand Down
13 changes: 13 additions & 0 deletions test/test_prototype_transforms_functional.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import math
import os
import re

import numpy as np
import PIL.Image
Expand All @@ -26,6 +27,15 @@ def script(fn):
raise AssertionError(f"Trying to `torch.jit.script` '{fn.__name__}' raised the error above.") from error


# Scripting a function often triggers a warning like
# `UserWarning: operator() profile_node %$INT1 : int[] = prim::profile_ivalue($INT2) does not have profile information`
# with varying `INT1` and `INT2`. Since these are uninteresting for us and only clutter the test summary, we ignore
# them.
ignore_jit_warning_no_profile = pytest.mark.filterwarnings(
f"ignore:{re.escape('operator() profile_node %')}:UserWarning"
)


def make_info_args_kwargs_params(info, *, args_kwargs_fn, test_id=None):
args_kwargs = list(args_kwargs_fn(info))
idx_field_len = len(str(len(args_kwargs)))
Expand Down Expand Up @@ -87,6 +97,7 @@ class TestKernels:
condition=lambda info: info.reference_fn is not None,
)

@ignore_jit_warning_no_profile
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_vs_eager(self, info, args_kwargs, device):
Expand Down Expand Up @@ -218,6 +229,7 @@ class TestDispatchers:
condition=lambda info: features.Image in info.kernels,
)

@ignore_jit_warning_no_profile
@image_sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_smoke(self, info, args_kwargs, device):
Expand All @@ -230,6 +242,7 @@ def test_scripted_smoke(self, info, args_kwargs, device):

# TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke`
# replaces this test for them.
@ignore_jit_warning_no_profile
@pytest.mark.parametrize(
"dispatcher",
[
Expand Down