From 555d44a3c802dad010151b94b3385193427997a3 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Mon, 11 Sep 2023 23:07:27 +0200 Subject: [PATCH] cleanup --- test/test_transforms_v2_consistency.py | 50 ---- test/transforms_v2_dispatcher_infos.py | 64 ----- test/transforms_v2_kernel_infos.py | 378 ------------------------- 3 files changed, 492 deletions(-) diff --git a/test/test_transforms_v2_consistency.py b/test/test_transforms_v2_consistency.py index 010198dd41a..c2fe5723f69 100644 --- a/test/test_transforms_v2_consistency.py +++ b/test/test_transforms_v2_consistency.py @@ -149,48 +149,6 @@ def __init__( # images given that the transform does nothing but call it anyway. supports_pil=False, ), - ConsistencyConfig( - v2_transforms.RandomPosterize, - legacy_transforms.RandomPosterize, - [ - ArgsKwargs(p=0, bits=5), - ArgsKwargs(p=1, bits=1), - ArgsKwargs(p=1, bits=3), - ], - make_images_kwargs=dict(DEFAULT_MAKE_IMAGES_KWARGS, dtypes=[torch.uint8]), - ), - ConsistencyConfig( - v2_transforms.RandomSolarize, - legacy_transforms.RandomSolarize, - [ - ArgsKwargs(p=0, threshold=0.5), - ArgsKwargs(p=1, threshold=0.3), - ArgsKwargs(p=1, threshold=0.99), - ], - ), - *[ - ConsistencyConfig( - v2_transforms.RandomAutocontrast, - legacy_transforms.RandomAutocontrast, - [ - ArgsKwargs(p=0), - ArgsKwargs(p=1), - ], - make_images_kwargs=dict(DEFAULT_MAKE_IMAGES_KWARGS, dtypes=[dt]), - closeness_kwargs=ckw, - ) - for dt, ckw in [(torch.uint8, dict(atol=1, rtol=0)), (torch.float32, dict(rtol=None, atol=None))] - ], - ConsistencyConfig( - v2_transforms.RandomAdjustSharpness, - legacy_transforms.RandomAdjustSharpness, - [ - ArgsKwargs(p=0, sharpness_factor=0.5), - ArgsKwargs(p=1, sharpness_factor=0.2), - ArgsKwargs(p=1, sharpness_factor=0.99), - ], - closeness_kwargs={"atol": 1e-6, "rtol": 1e-6}, - ), ConsistencyConfig( v2_transforms.RandomGrayscale, legacy_transforms.RandomGrayscale, @@ -794,17 +752,9 @@ def test_common(self, t_ref, t, data_kwargs): (legacy_F.to_pil_image, {}), (legacy_F.five_crop, {}), (legacy_F.ten_crop, {}), - (legacy_F.adjust_contrast, {}), - (legacy_F.adjust_saturation, {}), - (legacy_F.adjust_hue, {}), - (legacy_F.adjust_gamma, {}), (legacy_F.to_grayscale, {}), (legacy_F.rgb_to_grayscale, {}), (legacy_F.to_tensor, {}), - (legacy_F.posterize, {}), - (legacy_F.solarize, {}), - (legacy_F.adjust_sharpness, {}), - (legacy_F.autocontrast, {}), ], ) def test_dispatcher_signature_consistency(legacy_dispatcher, name_only_params): diff --git a/test/transforms_v2_dispatcher_infos.py b/test/transforms_v2_dispatcher_infos.py index a84923b620e..af5f48b148b 100644 --- a/test/transforms_v2_dispatcher_infos.py +++ b/test/transforms_v2_dispatcher_infos.py @@ -111,70 +111,6 @@ def xfail_jit_python_scalar_arg(name, *, reason=None): DISPATCHER_INFOS = [ - DispatcherInfo( - F.posterize, - kernels={ - tv_tensors.Image: F.posterize_image, - tv_tensors.Video: F.posterize_video, - }, - pil_kernel_info=PILKernelInfo(F._posterize_image_pil, kernel_name="posterize_image_pil"), - ), - DispatcherInfo( - F.solarize, - kernels={ - tv_tensors.Image: F.solarize_image, - tv_tensors.Video: F.solarize_video, - }, - pil_kernel_info=PILKernelInfo(F._solarize_image_pil, kernel_name="solarize_image_pil"), - ), - DispatcherInfo( - F.autocontrast, - kernels={ - tv_tensors.Image: F.autocontrast_image, - tv_tensors.Video: F.autocontrast_video, - }, - pil_kernel_info=PILKernelInfo(F._autocontrast_image_pil, kernel_name="autocontrast_image_pil"), - ), - DispatcherInfo( - F.adjust_sharpness, - kernels={ - tv_tensors.Image: F.adjust_sharpness_image, - tv_tensors.Video: F.adjust_sharpness_video, - }, - pil_kernel_info=PILKernelInfo(F._adjust_sharpness_image_pil, kernel_name="adjust_sharpness_image_pil"), - ), - DispatcherInfo( - F.adjust_contrast, - kernels={ - tv_tensors.Image: F.adjust_contrast_image, - tv_tensors.Video: F.adjust_contrast_video, - }, - pil_kernel_info=PILKernelInfo(F._adjust_contrast_image_pil, kernel_name="adjust_contrast_image_pil"), - ), - DispatcherInfo( - F.adjust_gamma, - kernels={ - tv_tensors.Image: F.adjust_gamma_image, - tv_tensors.Video: F.adjust_gamma_video, - }, - pil_kernel_info=PILKernelInfo(F._adjust_gamma_image_pil, kernel_name="adjust_gamma_image_pil"), - ), - DispatcherInfo( - F.adjust_hue, - kernels={ - tv_tensors.Image: F.adjust_hue_image, - tv_tensors.Video: F.adjust_hue_video, - }, - pil_kernel_info=PILKernelInfo(F._adjust_hue_image_pil, kernel_name="adjust_hue_image_pil"), - ), - DispatcherInfo( - F.adjust_saturation, - kernels={ - tv_tensors.Image: F.adjust_saturation_image, - tv_tensors.Video: F.adjust_saturation_video, - }, - pil_kernel_info=PILKernelInfo(F._adjust_saturation_image_pil, kernel_name="adjust_saturation_image_pil"), - ), DispatcherInfo( F.five_crop, kernels={ diff --git a/test/transforms_v2_kernel_infos.py b/test/transforms_v2_kernel_infos.py index 8371e60ffd9..c97ef48e707 100644 --- a/test/transforms_v2_kernel_infos.py +++ b/test/transforms_v2_kernel_infos.py @@ -5,10 +5,8 @@ import pytest import torch.testing import torchvision.transforms.v2.functional as F -from torchvision.transforms._functional_tensor import _max_value as get_max_value from transforms_v2_legacy_utils import ( ArgsKwargs, - DEFAULT_PORTRAIT_SPATIAL_SIZE, InfoBase, make_image_loaders, make_video_loaders, @@ -64,35 +62,6 @@ def __init__( self.logs_usage = logs_usage -def pixel_difference_closeness_kwargs(uint8_atol, *, dtype=torch.uint8, mae=False): - return dict(atol=uint8_atol / 255 * get_max_value(dtype), rtol=0, mae=mae) - - -def cuda_vs_cpu_pixel_difference(atol=1): - return { - (("TestKernels", "test_cuda_vs_cpu"), dtype, "cuda"): pixel_difference_closeness_kwargs(atol, dtype=dtype) - for dtype in [torch.uint8, torch.float32] - } - - -def pil_reference_pixel_difference(atol=1, mae=False): - return { - (("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs( - atol, mae=mae - ) - } - - -def float32_vs_uint8_pixel_difference(atol=1, mae=False): - return { - ( - ("TestKernels", "test_float32_vs_uint8"), - torch.float32, - "cpu", - ): pixel_difference_closeness_kwargs(atol, dtype=torch.float32, mae=mae) - } - - def pil_reference_wrapper(pil_kernel): @functools.wraps(pil_kernel) def wrapper(input_tensor, *other_args, **kwargs): @@ -135,353 +104,6 @@ def xfail_jit_python_scalar_arg(name, *, reason=None): KERNEL_INFOS = [] -_POSTERIZE_BITS = [1, 4, 8] - - -def sample_inputs_posterize_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, bits=_POSTERIZE_BITS[0]) - - -def reference_inputs_posterize_image_tensor(): - for image_loader, bits in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _POSTERIZE_BITS, - ): - yield ArgsKwargs(image_loader, bits=bits) - - -def sample_inputs_posterize_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, bits=_POSTERIZE_BITS[0]) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.posterize_image, - kernel_name="posterize_image_tensor", - sample_inputs_fn=sample_inputs_posterize_image_tensor, - reference_fn=pil_reference_wrapper(F._posterize_image_pil), - reference_inputs_fn=reference_inputs_posterize_image_tensor, - float32_vs_uint8=True, - closeness_kwargs=float32_vs_uint8_pixel_difference(), - ), - KernelInfo( - F.posterize_video, - sample_inputs_fn=sample_inputs_posterize_video, - ), - ] -) - - -def _get_solarize_thresholds(dtype): - for factor in [0.1, 0.5]: - max_value = get_max_value(dtype) - yield (float if dtype.is_floating_point else int)(max_value * factor) - - -def sample_inputs_solarize_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, threshold=next(_get_solarize_thresholds(image_loader.dtype))) - - -def reference_inputs_solarize_image_tensor(): - for image_loader in make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]): - for threshold in _get_solarize_thresholds(image_loader.dtype): - yield ArgsKwargs(image_loader, threshold=threshold) - - -def uint8_to_float32_threshold_adapter(other_args, kwargs): - return other_args, dict(threshold=kwargs["threshold"] / 255) - - -def sample_inputs_solarize_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, threshold=next(_get_solarize_thresholds(video_loader.dtype))) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.solarize_image, - kernel_name="solarize_image_tensor", - sample_inputs_fn=sample_inputs_solarize_image_tensor, - reference_fn=pil_reference_wrapper(F._solarize_image_pil), - reference_inputs_fn=reference_inputs_solarize_image_tensor, - float32_vs_uint8=uint8_to_float32_threshold_adapter, - closeness_kwargs=float32_vs_uint8_pixel_difference(), - ), - KernelInfo( - F.solarize_video, - sample_inputs_fn=sample_inputs_solarize_video, - ), - ] -) - - -def sample_inputs_autocontrast_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader) - - -def reference_inputs_autocontrast_image_tensor(): - for image_loader in make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]): - yield ArgsKwargs(image_loader) - - -def sample_inputs_autocontrast_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.autocontrast_image, - kernel_name="autocontrast_image_tensor", - sample_inputs_fn=sample_inputs_autocontrast_image_tensor, - reference_fn=pil_reference_wrapper(F._autocontrast_image_pil), - reference_inputs_fn=reference_inputs_autocontrast_image_tensor, - float32_vs_uint8=True, - closeness_kwargs={ - **pil_reference_pixel_difference(), - **float32_vs_uint8_pixel_difference(), - }, - ), - KernelInfo( - F.autocontrast_video, - sample_inputs_fn=sample_inputs_autocontrast_video, - ), - ] -) - -_ADJUST_SHARPNESS_FACTORS = [0.1, 0.5] - - -def sample_inputs_adjust_sharpness_image_tensor(): - for image_loader in make_image_loaders( - sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE, (2, 2)], - color_spaces=("GRAY", "RGB"), - ): - yield ArgsKwargs(image_loader, sharpness_factor=_ADJUST_SHARPNESS_FACTORS[0]) - - -def reference_inputs_adjust_sharpness_image_tensor(): - for image_loader, sharpness_factor in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _ADJUST_SHARPNESS_FACTORS, - ): - yield ArgsKwargs(image_loader, sharpness_factor=sharpness_factor) - - -def sample_inputs_adjust_sharpness_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, sharpness_factor=_ADJUST_SHARPNESS_FACTORS[0]) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.adjust_sharpness_image, - kernel_name="adjust_sharpness_image_tensor", - sample_inputs_fn=sample_inputs_adjust_sharpness_image_tensor, - reference_fn=pil_reference_wrapper(F._adjust_sharpness_image_pil), - reference_inputs_fn=reference_inputs_adjust_sharpness_image_tensor, - float32_vs_uint8=True, - closeness_kwargs=float32_vs_uint8_pixel_difference(2), - ), - KernelInfo( - F.adjust_sharpness_video, - sample_inputs_fn=sample_inputs_adjust_sharpness_video, - ), - ] -) - - -_ADJUST_CONTRAST_FACTORS = [0.1, 0.5] - - -def sample_inputs_adjust_contrast_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, contrast_factor=_ADJUST_CONTRAST_FACTORS[0]) - - -def reference_inputs_adjust_contrast_image_tensor(): - for image_loader, contrast_factor in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _ADJUST_CONTRAST_FACTORS, - ): - yield ArgsKwargs(image_loader, contrast_factor=contrast_factor) - - -def sample_inputs_adjust_contrast_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, contrast_factor=_ADJUST_CONTRAST_FACTORS[0]) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.adjust_contrast_image, - kernel_name="adjust_contrast_image_tensor", - sample_inputs_fn=sample_inputs_adjust_contrast_image_tensor, - reference_fn=pil_reference_wrapper(F._adjust_contrast_image_pil), - reference_inputs_fn=reference_inputs_adjust_contrast_image_tensor, - float32_vs_uint8=True, - closeness_kwargs={ - **pil_reference_pixel_difference(), - **float32_vs_uint8_pixel_difference(2), - **cuda_vs_cpu_pixel_difference(), - (("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs(1), - }, - ), - KernelInfo( - F.adjust_contrast_video, - sample_inputs_fn=sample_inputs_adjust_contrast_video, - closeness_kwargs={ - **cuda_vs_cpu_pixel_difference(), - (("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs(1), - }, - ), - ] -) - -_ADJUST_GAMMA_GAMMAS_GAINS = [ - (0.5, 2.0), - (0.0, 1.0), -] - - -def sample_inputs_adjust_gamma_image_tensor(): - gamma, gain = _ADJUST_GAMMA_GAMMAS_GAINS[0] - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, gamma=gamma, gain=gain) - - -def reference_inputs_adjust_gamma_image_tensor(): - for image_loader, (gamma, gain) in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _ADJUST_GAMMA_GAMMAS_GAINS, - ): - yield ArgsKwargs(image_loader, gamma=gamma, gain=gain) - - -def sample_inputs_adjust_gamma_video(): - gamma, gain = _ADJUST_GAMMA_GAMMAS_GAINS[0] - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, gamma=gamma, gain=gain) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.adjust_gamma_image, - kernel_name="adjust_gamma_image_tensor", - sample_inputs_fn=sample_inputs_adjust_gamma_image_tensor, - reference_fn=pil_reference_wrapper(F._adjust_gamma_image_pil), - reference_inputs_fn=reference_inputs_adjust_gamma_image_tensor, - float32_vs_uint8=True, - closeness_kwargs={ - **pil_reference_pixel_difference(), - **float32_vs_uint8_pixel_difference(), - }, - ), - KernelInfo( - F.adjust_gamma_video, - sample_inputs_fn=sample_inputs_adjust_gamma_video, - ), - ] -) - - -_ADJUST_HUE_FACTORS = [-0.1, 0.5] - - -def sample_inputs_adjust_hue_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, hue_factor=_ADJUST_HUE_FACTORS[0]) - - -def reference_inputs_adjust_hue_image_tensor(): - for image_loader, hue_factor in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _ADJUST_HUE_FACTORS, - ): - yield ArgsKwargs(image_loader, hue_factor=hue_factor) - - -def sample_inputs_adjust_hue_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, hue_factor=_ADJUST_HUE_FACTORS[0]) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.adjust_hue_image, - kernel_name="adjust_hue_image_tensor", - sample_inputs_fn=sample_inputs_adjust_hue_image_tensor, - reference_fn=pil_reference_wrapper(F._adjust_hue_image_pil), - reference_inputs_fn=reference_inputs_adjust_hue_image_tensor, - float32_vs_uint8=True, - closeness_kwargs={ - **pil_reference_pixel_difference(2, mae=True), - **float32_vs_uint8_pixel_difference(), - }, - ), - KernelInfo( - F.adjust_hue_video, - sample_inputs_fn=sample_inputs_adjust_hue_video, - ), - ] -) - -_ADJUST_SATURATION_FACTORS = [0.1, 0.5] - - -def sample_inputs_adjust_saturation_image_tensor(): - for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")): - yield ArgsKwargs(image_loader, saturation_factor=_ADJUST_SATURATION_FACTORS[0]) - - -def reference_inputs_adjust_saturation_image_tensor(): - for image_loader, saturation_factor in itertools.product( - make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]), - _ADJUST_SATURATION_FACTORS, - ): - yield ArgsKwargs(image_loader, saturation_factor=saturation_factor) - - -def sample_inputs_adjust_saturation_video(): - for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]): - yield ArgsKwargs(video_loader, saturation_factor=_ADJUST_SATURATION_FACTORS[0]) - - -KERNEL_INFOS.extend( - [ - KernelInfo( - F.adjust_saturation_image, - kernel_name="adjust_saturation_image_tensor", - sample_inputs_fn=sample_inputs_adjust_saturation_image_tensor, - reference_fn=pil_reference_wrapper(F._adjust_saturation_image_pil), - reference_inputs_fn=reference_inputs_adjust_saturation_image_tensor, - float32_vs_uint8=True, - closeness_kwargs={ - **pil_reference_pixel_difference(), - **float32_vs_uint8_pixel_difference(2), - **cuda_vs_cpu_pixel_difference(), - }, - ), - KernelInfo( - F.adjust_saturation_video, - sample_inputs_fn=sample_inputs_adjust_saturation_video, - closeness_kwargs=cuda_vs_cpu_pixel_difference(), - ), - ] -) - - _FIVE_TEN_CROP_SIZES = [7, (6,), [5], (6, 5), [7, 6]]