Skip to content

Commit

Permalink
upgrade old idioms with pyupgrade
Browse files Browse the repository at this point in the history
  • Loading branch information
ylep committed Jul 2, 2024
1 parent d7d19d0 commit 43f9714
Show file tree
Hide file tree
Showing 24 changed files with 105 additions and 114 deletions.
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# neuroglancer-scripts documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 2 15:05:24 2018.
Expand Down
10 changes: 5 additions & 5 deletions experimental/mesh_to_vtk.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
def mesh_file_to_vtk(input_filename, output_filename, data_format="ascii",
coord_transform=None):
"""Convert a mesh file read by nibabel to VTK format"""
print("Reading {}".format(input_filename))
print(f"Reading {input_filename}")
mesh = nibabel.load(input_filename)
print()
print("Summary")
Expand Down Expand Up @@ -44,7 +44,7 @@ def mesh_file_to_vtk(input_filename, output_filename, data_format="ascii",
# Gifti uses millimetres, Neuroglancer expects nanometres
points *= 1e6

with open(output_filename, "wt") as output_file:
with open(output_filename, "w") as output_file:
neuroglancer_scripts.mesh.save_mesh_as_neuroglancer_vtk(
output_file, points, triangles
)
Expand Down Expand Up @@ -78,15 +78,15 @@ def parse_command_line(argv):
try:
matrix = np.fromstring(args.coord_transform, sep=",")
except ValueError as exc:
parser.error("cannot parse --coord-transform: {}"
.format(exc.args[0]))
parser.error(f"cannot parse --coord-transform: {exc.args[0]}"
)
if len(matrix) == 12:
matrix = matrix.reshape(3, 4)
elif len(matrix) == 16:
matrix = matrix.reshape(4, 4)
else:
parser.error("--coord-transform must have 12 or 16 elements"
" ({} passed)".format(len(matrix)))
f" ({len(matrix)} passed)")

args.coord_transform = matrix

Expand Down
12 changes: 6 additions & 6 deletions experimental/off_to_vtk.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
def off_mesh_file_to_vtk(input_filename, output_filename, data_format="binary",
coord_transform=None):
"""Convert a mesh file from OFF format to VTK format"""
print("Reading {}".format(input_filename))
print(f"Reading {input_filename}")
with gzip.open(input_filename, "rt") as f:
header_keyword = f.readline().strip()
match = re.match(r"(ST)?(C)?(N)?(4)?(n)?OFF", header_keyword)
Expand Down Expand Up @@ -48,8 +48,8 @@ def off_mesh_file_to_vtk(input_filename, output_filename, data_format="binary",
triangles[i, 1] = float(components[2])
triangles[i, 2] = float(components[3])
print()
print("{0} vertices and {1} triangles read"
.format(num_vertices, num_triangles))
print(f"{num_vertices} vertices and {num_triangles} triangles read"
)

points = vertices

Expand Down Expand Up @@ -108,15 +108,15 @@ def parse_command_line(argv):
try:
matrix = np.fromstring(args.coord_transform, sep=",")
except ValueError as exc:
parser.error("cannot parse --coord-transform: {}"
.format(exc.args[0]))
parser.error(f"cannot parse --coord-transform: {exc.args[0]}"
)
if len(matrix) == 12:
matrix = matrix.reshape(3, 4)
elif len(matrix) == 16:
matrix = matrix.reshape(4, 4)
else:
parser.error("--coord-transform must have 12 or 16 elements"
" ({} passed)".format(len(matrix)))
f" ({len(matrix)} passed)")

args.coord_transform = matrix

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ extend-select = [
"I",
"N",
"NPY",
"UP",
]
ignore = [
"N802", # Gives false positives when a name contains an uppercase acronym
Expand Down
14 changes: 7 additions & 7 deletions script_tests/test_scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ def test_jubrain_example_MPM(examples_dir, tmpdir):
try:
gzip.open(str(input_nifti)).read(348)
except OSError as exc:
pytest.skip("Cannot find a valid example file {0} for testing: {1}"
.format(input_nifti, exc))
pytest.skip(f"Cannot find a valid example file {input_nifti} for "
f"testing: {exc}")

output_dir = tmpdir / "MPM"
assert subprocess.call([
Expand Down Expand Up @@ -80,8 +80,8 @@ def test_all_in_one_conversion(examples_dir, tmpdir):
try:
gzip.open(str(input_nifti)).read(348)
except OSError as exc:
pytest.skip("Cannot find a valid example file {0} for testing: {1}"
.format(input_nifti, exc))
pytest.skip(f"Cannot find a valid example file {input_nifti} for "
f"testing: {exc}")

output_dir = tmpdir / "colin27T1_seg"
assert subprocess.call([
Expand All @@ -104,8 +104,8 @@ def test_sharded_conversion(examples_dir, tmpdir):
try:
gzip.open(str(input_nifti)).read(348)
except OSError as exc:
pytest.skip("Cannot find a valid example file {0} for testing: {1}"
.format(input_nifti, exc))
pytest.skip(f"Cannot find a valid example file {input_nifti} for "
f"testing: {exc}")

output_dir = tmpdir / "colin27T1_seg_sharded"
assert subprocess.call([
Expand All @@ -116,7 +116,7 @@ def test_sharded_conversion(examples_dir, tmpdir):
str(output_dir)
], env=env) == 4 # datatype not supported by neuroglancer

with open(output_dir / "info_fullres.json", "r") as fp:
with open(output_dir / "info_fullres.json") as fp:
fullres_info = json.load(fp=fp)
with open(output_dir / "info_fullres.json", "w") as fp:
fullres_info["data_type"] = "uint8"
Expand Down
6 changes: 3 additions & 3 deletions src/neuroglancer_scripts/_compressed_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def _encode_channel(chunk_channel, block_size):

def _pack_encoded_values(encoded_values, bits):
if bits == 0:
return bytes()
return b""
else:
assert 32 % bits == 0
assert np.array_equal(encoded_values,
Expand Down Expand Up @@ -162,8 +162,8 @@ def _decode_channel_into(chunk, channel, buf, block_size):
bits = res[0] >> 24
if bits not in (0, 1, 2, 4, 8, 16, 32):
raise InvalidFormatError("Invalid number of encoding bits for "
"compressed_segmentation block ({0})"
.format(bits))
f"compressed_segmentation block ({bits})"
)
encoded_values_offset = 4 * res[1]
lookup_table_past_end = lookup_table_offset + chunk.itemsize * min(
(2 ** bits),
Expand Down
17 changes: 8 additions & 9 deletions src/neuroglancer_scripts/_jpeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,17 @@ def decode_chunk(buf, chunk_size, num_channels):
img = PIL.Image.open(io_buf)
except Exception as exc:
raise InvalidFormatError(
"The JPEG-encoded chunk could not be decoded: {0}"
.format(exc)) from exc
f"The JPEG-encoded chunk could not be decoded: {exc}"
) from exc

if num_channels == 1 and img.mode != "L":
raise InvalidFormatError(
"The JPEG chunk is encoded with mode={0} instead of L"
.format(img.mode))
f"The JPEG chunk is encoded with mode={img.mode} instead of L"
)
if num_channels == 3 and img.mode != "RGB":
raise InvalidFormatError(
"The JPEG chunk is encoded with mode={0} instead of RGB"
.format(img.mode))
f"The JPEG chunk is encoded with mode={img.mode} instead of RGB"
)

flat_chunk = np.asarray(img)
if num_channels == 3:
Expand All @@ -67,7 +67,6 @@ def decode_chunk(buf, chunk_size, num_channels):
chunk_size[2], chunk_size[1], chunk_size[0])
except Exception:
raise InvalidFormatError("The JPEG-encoded chunk has an incompatible "
"shape ({0} elements, expecting {1})"
.format(flat_chunk.size // num_channels,
np.prod(chunk_size)))
f"shape ({flat_chunk.size // num_channels} "
f"elements, expecting {np.prod(chunk_size)})")
return chunk
4 changes: 2 additions & 2 deletions src/neuroglancer_scripts/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def get_accessor_for_url(url, accessor_options={}):
return sharded_http_accessor.ShardedHttpAccessor(url)
return accessor
else:
raise URLError("Unsupported URL scheme {0} (must be file, http, or "
"https)".format(r.scheme))
raise URLError(f"Unsupported URL scheme {r.scheme} (must be file, "
"http, or https)")


def add_argparse_options(parser, write_chunks=True, write_files=True):
Expand Down
18 changes: 9 additions & 9 deletions src/neuroglancer_scripts/chunk_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,14 @@ def get_encoder(info, scale_info, encoder_options={}):
num_channels = info["num_channels"]
encoding = scale_info["encoding"]
except KeyError as exc:
raise InvalidInfoError("The info dict is missing an essential key {0}"
.format(exc)) from exc
raise InvalidInfoError("The info dict is missing an essential key "
f"{exc}") from exc
if not isinstance(num_channels, int) or not num_channels > 0:
raise InvalidInfoError("Invalid value {0} for num_channels (must be "
"a positive integer)".format(num_channels))
raise InvalidInfoError(f"Invalid value {num_channels} for "
"num_channels (must be a positive integer)")
if data_type not in NEUROGLANCER_DATA_TYPES:
raise InvalidInfoError("Invalid data_type {0} (should be one of {1})"
.format(data_type, NEUROGLANCER_DATA_TYPES))
raise InvalidInfoError(f"Invalid data_type {data_type} (should be one "
f"of {NEUROGLANCER_DATA_TYPES})")
try:
if encoding == "raw":
return RawChunkEncoder(data_type, num_channels)
Expand All @@ -75,7 +75,7 @@ def get_encoder(info, scale_info, encoder_options={}):
jpeg_plane=jpeg_plane,
jpeg_quality=jpeg_quality)
else:
raise InvalidInfoError("Invalid encoding {0}".format(encoding))
raise InvalidInfoError(f"Invalid encoding {encoding}")
except IncompatibleEncoderError as exc:
raise InvalidInfoError(str(exc)) from exc

Expand Down Expand Up @@ -191,8 +191,8 @@ def decode(self, buf, chunk_size):
(self.num_channels,
chunk_size[2], chunk_size[1], chunk_size[0]))
except Exception as exc:
raise InvalidFormatError("Cannot decode raw-encoded chunk: {0}"
.format(exc)) from exc
raise InvalidFormatError(f"Cannot decode raw-encoded chunk: {exc}"
) from exc


class CompressedSegmentationEncoder(ChunkEncoder):
Expand Down
2 changes: 1 addition & 1 deletion src/neuroglancer_scripts/data_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def get_dtype(input_dtype):
if input_dtype.names is None:
return input_dtype, False
if input_dtype.names not in NG_MULTICHANNEL_DATATYPES:
err = 'tuple datatype {} not yet supported'.format(input_dtype.names)
err = f'tuple datatype {input_dtype.names} not yet supported'
raise NotImplementedError(err)
for index, value in enumerate(input_dtype.names):
err = 'Multichanneled datatype should have the same datatype'
Expand Down
4 changes: 2 additions & 2 deletions src/neuroglancer_scripts/downscaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ def get_downscaler(downscaling_method, info=None, options={}):
elif downscaling_method == "stride":
return StridingDownscaler()
else:
raise NotImplementedError("invalid downscaling method {0}"
.format(downscaling_method))
raise NotImplementedError("invalid downscaling method "
+ downscaling_method)


def add_argparse_options(parser):
Expand Down
12 changes: 6 additions & 6 deletions src/neuroglancer_scripts/dyadic_pyramid.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def choose_unit_for_key(resolution_nm):
and (format_length(resolution_nm, unit)
!= format_length(resolution_nm * 2, unit))):
return unit
raise NotImplementedError("cannot find a suitable unit for {} nm"
.format(resolution_nm))
raise NotImplementedError("cannot find a suitable unit for "
f"{resolution_nm} nm")


def fill_scales_for_dyadic_pyramid(info, target_chunk_size=64,
Expand Down Expand Up @@ -175,15 +175,15 @@ def compute_dyadic_downscaling(info, source_scale_index, downscaler,
if new_size != [ceil_div(os, ds)
for os, ds in zip(old_size, downscaling_factors)]:
raise ValueError("Unsupported downscaling factor between scales "
"{} and {} (only 1 and 2 are supported)"
.format(old_key, new_key))
f"{old_key} and {new_key} "
"(only 1 and 2 are supported)")

downscaler.check_factors(downscaling_factors)

if chunk_reader.scale_is_lossy(old_key):
logger.warning(
"Using data stored in a lossy format (scale %s) as an input "
"for downscaling (to scale %s)" % (old_key, new_key)
"for downscaling (to scale %s)", old_key, new_key
)

half_chunk = [osz // f
Expand All @@ -210,7 +210,7 @@ def load_and_downscale_old_chunk(z_idx, y_idx, x_idx):
# TODO how to do progress report correctly with logging?
for x_idx, y_idx, z_idx in tqdm(
np.ndindex(chunk_range), total=np.prod(chunk_range),
desc="computing scale {}".format(new_key),
desc=f"computing scale {new_key}",
unit="chunks", leave=True):
xmin = new_chunk_size[0] * x_idx
xmax = min(new_chunk_size[0] * (x_idx + 1), new_size[0])
Expand Down
30 changes: 15 additions & 15 deletions src/neuroglancer_scripts/file_accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def file_exists(self, relative_path):
return True
except OSError as exc:
raise DataAccessError(
"Error fetching {0}: {1}".format(file_path, exc)) from exc
f"Error fetching {file_path}: {exc}") from exc
return False

def fetch_file(self, relative_path):
Expand All @@ -79,13 +79,13 @@ def fetch_file(self, relative_path):
f = gzip.open(str(file_path.with_name(file_path.name + ".gz")),
"rb")
else:
raise DataAccessError("Cannot find {0} in {1}".format(
relative_path, self.base_path))
raise DataAccessError(f"Cannot find {relative_path} in "
f"{self.base_path}")
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error fetching {0}: {1}".format(file_path, exc)) from exc
f"Error fetching {file_path}: {exc}") from exc

def store_file(self, relative_path, buf,
mime_type="application/octet-stream",
Expand All @@ -107,8 +107,8 @@ def store_file(self, relative_path, buf,
with file_path.open(mode) as f:
f.write(buf)
except OSError as exc:
raise DataAccessError("Error storing {0}: {1}"
.format(file_path, exc)) from exc
raise DataAccessError(f"Error storing {file_path}: {exc}"
) from exc

def fetch_chunk(self, key, chunk_coords):
f = None
Expand All @@ -124,17 +124,17 @@ def fetch_chunk(self, key, chunk_coords):
)
if f is None:
raise DataAccessError(
"Cannot find chunk {0} in {1}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path)
"Cannot find chunk "
f"{self._flat_chunk_basename(key, chunk_coords)} in "
f"{self.base_path}"
)
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error accessing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path, exc)) from exc
"Error accessing chunk "
f"{self._flat_chunk_basename(key, chunk_coords)} in "
f"{self.base_path}: {exc}" ) from exc

def store_chunk(self, buf, key, chunk_coords,
mime_type="application/octet-stream",
Expand All @@ -153,9 +153,9 @@ def store_chunk(self, buf, key, chunk_coords,
f.write(buf)
except OSError as exc:
raise DataAccessError(
"Error storing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path, exc)) from exc
"Error storing chunk "
f"{self._flat_chunk_basename(key, chunk_coords)} in "
f"{self.base_path}: {exc}" ) from exc

def _chunk_path(self, key, chunk_coords, pattern=None):
if pattern is None:
Expand Down
7 changes: 3 additions & 4 deletions src/neuroglancer_scripts/http_accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def file_exists(self, relative_path):
return False
r.raise_for_status()
except requests.exceptions.RequestException as exc:
raise DataAccessError("Error probing the existence of {0}: {1}"
.format(file_url, exc)) from exc
raise DataAccessError("Error probing the existence of "
f"{file_url}: {exc}") from exc
return True

def fetch_file(self, relative_path):
Expand All @@ -71,6 +71,5 @@ def fetch_file(self, relative_path):
r = self._session.get(file_url)
r.raise_for_status()
except requests.exceptions.RequestException as exc:
raise DataAccessError("Error reading {0}: {1}"
.format(file_url, exc)) from exc
raise DataAccessError(f"Error reading {file_url}: {exc}") from exc
return r.content
Loading

0 comments on commit 43f9714

Please sign in to comment.