Skip to content

Commit

Permalink
Fixing the upper bound limit of random pixels in tests to 256. (#3136)
Browse files Browse the repository at this point in the history
Reviewed By: datumbox

Differential Revision: D25396708

fbshipit-source-id: 7aa27cd76f135c0ccca6b8412fec9388b3df0919
  • Loading branch information
fmassa authored and facebook-github-bot committed Dec 8, 2020
1 parent bfbac92 commit 3d42d7b
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion test/fakedata_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def _encode(v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]

def _make_image_file(filename, num_images):
img = torch.randint(0, 255, size=(28 * 28 * num_images,), dtype=torch.uint8)
img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2051)) # magic header
f.write(_encode(num_images))
Expand Down
2 changes: 1 addition & 1 deletion test/test_datasets_samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def get_list_of_videos(num_videos=5, sizes=None, fps=None):
f = 5
else:
f = fps[i]
data = torch.randint(0, 255, (size, 300, 400, 3), dtype=torch.uint8)
data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8)
name = os.path.join(tmp_dir, "{}.mp4".format(i))
names.append(name)
io.write_video(name, data, fps=f)
Expand Down
2 changes: 1 addition & 1 deletion test/test_datasets_video_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def get_list_of_videos(num_videos=5, sizes=None, fps=None):
f = 5
else:
f = fps[i]
data = torch.randint(0, 255, (size, 300, 400, 3), dtype=torch.uint8)
data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8)
name = os.path.join(tmp_dir, "{}.mp4".format(i))
names.append(name)
io.write_video(name, data, fps=f)
Expand Down
20 changes: 10 additions & 10 deletions test/test_transforms_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def test_center_crop(self):
self._test_op(
"center_crop", "CenterCrop", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
tensor = torch.randint(0, 255, (3, 10, 10), dtype=torch.uint8, device=self.device)
tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=self.device)
# Test torchscript of transforms.CenterCrop with size as int
f = T.CenterCrop(size=5)
scripted_fn = torch.jit.script(f)
Expand Down Expand Up @@ -294,7 +294,7 @@ def test_resize(self):
self.assertEqual(y.shape[2], int(38 * 46 / 32))

tensor, _ = self._create_data(height=34, width=36, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
script_fn = torch.jit.script(F.resize)

for dt in [None, torch.float32, torch.float64]:
Expand Down Expand Up @@ -323,8 +323,8 @@ def test_resize(self):
script_fn.save(os.path.join(tmp_dir, "t_resize.pt"))

def test_resized_crop(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)

for scale in [(0.7, 1.2), [0.7, 1.2]]:
for ratio in [(0.75, 1.333), [0.75, 1.333]]:
Expand All @@ -341,8 +341,8 @@ def test_resized_crop(self):
s_transform.save(os.path.join(tmp_dir, "t_resized_crop.pt"))

def test_random_affine(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)

for shear in [15, 10.0, (5.0, 10.0), [-15, 15], [-10.0, 10.0, -11.0, 11.0]]:
for scale in [(0.7, 1.2), [0.7, 1.2]]:
Expand All @@ -363,8 +363,8 @@ def test_random_affine(self):
s_transform.save(os.path.join(tmp_dir, "t_random_affine.pt"))

def test_random_rotate(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)

for center in [(0, 0), [10, 10], None, (56, 44)]:
for expand in [True, False]:
Expand All @@ -383,8 +383,8 @@ def test_random_rotate(self):
s_transform.save(os.path.join(tmp_dir, "t_random_rotate.pt"))

def test_random_perspective(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)

for distortion_scale in np.linspace(0.1, 1.0, num=20):
for interpolation in [NEAREST, BILINEAR]:
Expand Down

0 comments on commit 3d42d7b

Please sign in to comment.