From 1eba14f058e939129e7fec8e8a3c1d33d403cf56 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 25 Oct 2022 11:48:44 +0200 Subject: [PATCH 1/2] ignore git warning 'globally' --- test/prototype_transforms_kernel_infos.py | 29 ++++++-------------- test/test_prototype_transforms_functional.py | 12 ++++++++ 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/test/prototype_transforms_kernel_infos.py b/test/prototype_transforms_kernel_infos.py index 587deb3416f..c417b33c2a3 100644 --- a/test/prototype_transforms_kernel_infos.py +++ b/test/prototype_transforms_kernel_infos.py @@ -2,7 +2,6 @@ import functools import itertools import math -import re import numpy as np import pytest @@ -159,12 +158,6 @@ def sample_inputs_horizontal_flip_video(): KernelInfo( F.horizontal_flip_bounding_box, sample_inputs_fn=sample_inputs_horizontal_flip_bounding_box, - test_marks=[ - TestMark( - ("TestKernels", "test_scripted_vs_eager"), - pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %72')}:UserWarning"), - ) - ], ), KernelInfo( F.horizontal_flip_mask, @@ -2045,17 +2038,11 @@ def sample_inputs_convert_dtype_video(): yield ArgsKwargs(video_loader) -_common_convert_dtype_marks = [ - TestMark( - ("TestKernels", "test_dtype_and_device_consistency"), - pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"), - condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32), - ), - TestMark( - ("TestKernels", "test_scripted_vs_eager"), - pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %')}:UserWarning"), - ), -] +skip_dtype_consistency = TestMark( + ("TestKernels", "test_dtype_and_device_consistency"), + pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"), + condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32), +) KERNEL_INFOS.extend( [ @@ -2065,7 +2052,7 @@ def sample_inputs_convert_dtype_video(): reference_fn=reference_convert_dtype_image_tensor, reference_inputs_fn=reference_inputs_convert_dtype_image_tensor, test_marks=[ - *_common_convert_dtype_marks, + skip_dtype_consistency, TestMark( ("TestKernels", "test_against_reference"), pytest.mark.xfail(reason="Conversion overflows"), @@ -2083,7 +2070,9 @@ def sample_inputs_convert_dtype_video(): KernelInfo( F.convert_dtype_video, sample_inputs_fn=sample_inputs_convert_dtype_video, - test_marks=_common_convert_dtype_marks, + test_marks=[ + skip_dtype_consistency, + ], ), ] ) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 0ddfcf1b3e6..964429c0000 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -1,5 +1,6 @@ import math import os +import re import numpy as np import PIL.Image @@ -26,6 +27,14 @@ def script(fn): raise AssertionError(f"Trying to `torch.jit.script` '{fn.__name__}' raised the error above.") from error +# Scripting a function often triggers +# UserWarning: operator() profile_node %373 : int[] = prim::profile_ivalue(%371) does not have profile information +# with varying numbers. Since these are uninteresting for us and only clutter the test summary, we ignore them. +ignore_jit_warning_no_profile = pytest.mark.filterwarnings( + f"ignore:{re.escape('operator() profile_node %')}:UserWarning" +) + + def make_info_args_kwargs_params(info, *, args_kwargs_fn, test_id=None): args_kwargs = list(args_kwargs_fn(info)) idx_field_len = len(str(len(args_kwargs))) @@ -87,6 +96,7 @@ class TestKernels: condition=lambda info: info.reference_fn is not None, ) + @ignore_jit_warning_no_profile @sample_inputs @pytest.mark.parametrize("device", cpu_and_gpu()) def test_scripted_vs_eager(self, info, args_kwargs, device): @@ -218,6 +228,7 @@ class TestDispatchers: condition=lambda info: features.Image in info.kernels, ) + @ignore_jit_warning_no_profile @image_sample_inputs @pytest.mark.parametrize("device", cpu_and_gpu()) def test_scripted_smoke(self, info, args_kwargs, device): @@ -230,6 +241,7 @@ def test_scripted_smoke(self, info, args_kwargs, device): # TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke` # replaces this test for them. + @ignore_jit_warning_no_profile @pytest.mark.parametrize( "dispatcher", [ From 8279b4f5148b269422dffe7afa05048048e2a4fb Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 25 Oct 2022 12:33:28 +0200 Subject: [PATCH 2/2] improve comment --- test/test_prototype_transforms_functional.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 964429c0000..c739598a169 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -27,9 +27,10 @@ def script(fn): raise AssertionError(f"Trying to `torch.jit.script` '{fn.__name__}' raised the error above.") from error -# Scripting a function often triggers -# UserWarning: operator() profile_node %373 : int[] = prim::profile_ivalue(%371) does not have profile information -# with varying numbers. Since these are uninteresting for us and only clutter the test summary, we ignore them. +# Scripting a function often triggers a warning like +# `UserWarning: operator() profile_node %$INT1 : int[] = prim::profile_ivalue($INT2) does not have profile information` +# with varying `INT1` and `INT2`. Since these are uninteresting for us and only clutter the test summary, we ignore +# them. ignore_jit_warning_no_profile = pytest.mark.filterwarnings( f"ignore:{re.escape('operator() profile_node %')}:UserWarning" )