Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added antialias option to transforms.functional.resize #3761

Merged
merged 11 commits into from
May 10, 2021
8 changes: 7 additions & 1 deletion ios/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.4.1)
cmake_minimum_required(VERSION 3.6)
set(TARGET torchvision_ops)
project(${TARGET} CXX)
set(CMAKE_CXX_STANDARD 14)
Expand All @@ -11,6 +11,12 @@ file(GLOB VISION_SRCS
../torchvision/csrc/ops/*.h
../torchvision/csrc/ops/*.cpp)

# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and using TensorIterator unavailable with iOS
# FILTER was added in CMake>=3.6 => 3.4.1 -> 3.6
list(FILTER VISION_SRCS EXCLUDE REGEX ".+(interpolate_aa).+")

add_library(${TARGET} STATIC
${VISION_SRCS}
)
Expand Down
49 changes: 49 additions & 0 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,55 @@ def test_resize(self):
with self.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"):
F.resize(img, size=32, max_size=32)

def test_resize_antialias(self):

if self.device == "cuda":
self.skipTest("Not implemented for CUDA device")
vfdev-5 marked this conversation as resolved.
Show resolved Hide resolved

script_fn = torch.jit.script(F.resize)
tensor, pil_img = self._create_data(320, 290, device=self.device)

for dt in [None, torch.float32, torch.float64, torch.float16]:

if dt == torch.float16 and torch.device(self.device).type == "cpu":
# skip float16 on CPU case
continue

if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)

for size in [[96, 72], [96, 420], [420, 72]]:
for interpolation in [BILINEAR, ]:
resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, antialias=True)
resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation)

self.assertEqual(
resized_tensor.size()[1:], resized_pil_img.size[::-1],
msg=f"{size}, {interpolation}, {dt}"
)

resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)

self.approxEqualTensorToPIL(
resized_tensor_f, resized_pil_img, tol=0.5, msg=f"{size}, {interpolation}, {dt}"
)
self.approxEqualTensorToPIL(
resized_tensor_f, resized_pil_img, tol=1.0 + 1e-5, agg_method="max",
msg=f"{size}, {interpolation}, {dt}"
)

if isinstance(size, int):
script_size = [size, ]
else:
script_size = size

resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, antialias=True)
self.assertTrue(resized_tensor.equal(resize_result), msg=f"{size}, {interpolation}, {dt}")

def test_resized_crop(self):
# test values of F.resized_crop in several cases:
# 1) resize to the same size, crop to the same size => should be identity
Expand Down
4 changes: 4 additions & 0 deletions test/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,10 @@ def test_resize(self):

self.assertEqual((owidth, oheight), result.size)

with self.assertWarnsRegex(UserWarning, r"Anti-alias option is always applied for PIL Image input"):
t = transforms.Resize(osize, antialias=False)
t(img)

def test_random_crop(self):
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
Expand Down
Loading