Skip to content

Commit

Permalink
Refactor string formatting to use f-strings (#128)
Browse files Browse the repository at this point in the history
Python f-string's are the preferred approach to formatting strings and is supported since Python 3.6. Replace remaining old-style string-formatting to use f-string style.

Additionally, refactor certain error messages to be consistent with others.
  • Loading branch information
addisonElliott authored Sep 20, 2022
1 parent f4b1ba1 commit c3c0d24
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 28 deletions.
26 changes: 12 additions & 14 deletions nrrd/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def _parse_field_value(value, field_type):
# for none rows. NaN is only valid for floating point numbers
return parse_optional_matrix(value)
else:
raise NRRDError('Invalid field type given: %s' % field_type)
raise NRRDError(f'Invalid field type given: {field_type}')


def _determine_datatype(fields):
Expand All @@ -162,7 +162,7 @@ def _determine_datatype(fields):
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
else:
raise NRRDError('Invalid endian value in header: "%s"' % fields['endian'])
raise NRRDError(f'Invalid endian value in header: {fields["endian"]}')

return np.dtype(np_typestring)

Expand All @@ -189,10 +189,10 @@ def _validate_magic_line(line):
try:
version = int(line[4:])
if version > 5:
raise NRRDError('Unsupported NRRD file version (version: %i). This library only supports v%i and below.'
% (version, 5))
raise NRRDError(f'Unsupported NRRD file version (version: {version}). This library only supports v5 '
'and below.')
except ValueError:
raise NRRDError('Invalid NRRD magic line: %s' % line)
raise NRRDError(f'Invalid NRRD magic line: {line}')

return len(line)

Expand Down Expand Up @@ -279,12 +279,10 @@ def read_header(file, custom_field_map=None):

# Check if the field has been added already
if field in header.keys():
dup_message = "Duplicate header field: '%s'" % str(field)

if not ALLOW_DUPLICATE_FIELD:
raise NRRDError(dup_message)

warnings.warn(dup_message)
raise NRRDError(f'Duplicate header field: {field}')
else:
warnings.warn(f'Duplicate header field: {field}')

# Get the datatype of the field based on it's field name and custom field map
field_type = _get_field_type(field, custom_field_map)
Expand Down Expand Up @@ -341,11 +339,11 @@ def read_data(header, fh=None, filename=None, index_order='F'):
# Check that the required fields are in the header
for field in _NRRD_REQUIRED_FIELDS:
if field not in header:
raise NRRDError('Header is missing required field: "%s".' % field)
raise NRRDError(f'Header is missing required field: {field}')

if header['dimension'] != len(header['sizes']):
raise NRRDError('Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i' % (
header['dimension'], len(header['sizes'])))
raise NRRDError(f'Number of elements in sizes does not match dimension. Dimension: {header["dimension"]}, '
f'len(sizes): {len(header["sizes"])}')

# Determine the data type from the header
dtype = _determine_datatype(header)
Expand Down Expand Up @@ -424,7 +422,7 @@ def read_data(header, fh=None, filename=None, index_order='F'):
# to close it for us
fh.close()

raise NRRDError('Unsupported encoding: "%s"' % header['encoding'])
raise NRRDError(f'Unsupported encoding: {header["encoding"]}')

# Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks)
decompressed_data = bytearray()
Expand Down
10 changes: 5 additions & 5 deletions nrrd/tests/test_reading.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,15 +178,15 @@ def test_read_dup_field_error_and_warn(self):
expected_header = {'type': 'float', 'dimension': 3}
header_txt_tuple = ('NRRD0005', 'type: float', 'dimension: 3', 'type: float')

with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: 'type'"):
with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: type"):
header = nrrd.read_header(header_txt_tuple)

import warnings
with warnings.catch_warnings(record=True) as w:
nrrd.reader.ALLOW_DUPLICATE_FIELD = True
header = nrrd.read_header(header_txt_tuple)

self.assertTrue("Duplicate header field: 'type'" in str(w[0].message))
self.assertTrue("Duplicate header field: type" in str(w[0].message))

self.assertEqual(expected_header, header)
nrrd.reader.ALLOW_DUPLICATE_FIELD = False
Expand Down Expand Up @@ -334,7 +334,7 @@ def test_missing_required_field(self):
# Delete required field
del header['type']

with self.assertRaisesRegex(nrrd.NRRDError, 'Header is missing required field: "type".'):
with self.assertRaisesRegex(nrrd.NRRDError, 'Header is missing required field: type'):
nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)

def test_wrong_sizes(self):
Expand All @@ -357,7 +357,7 @@ def test_invalid_encoding(self):
# Set the encoding to be incorrect
header['encoding'] = 'fake'

with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported encoding: "fake"'):
with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported encoding: fake'):
nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)

def test_detached_header_no_filename(self):
Expand Down Expand Up @@ -417,7 +417,7 @@ def test_invalid_endian(self):
# Set endianness to fake value
header['endian'] = 'fake'

with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid endian value in header: "fake"'):
with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid endian value in header: fake'):
nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)

def test_invalid_index_order(self):
Expand Down
2 changes: 1 addition & 1 deletion nrrd/tests/test_writing.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def test_unsupported_encoding(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_unsupported_encoding.nrrd')
header = {'encoding': 'fake'}

with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported encoding: "fake"'):
with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported encoding: fake'):
nrrd.write(output_filename, np.zeros((3, 9)), header, index_order=self.index_order)

def test_invalid_index_order(self):
Expand Down
16 changes: 8 additions & 8 deletions nrrd/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _format_field_value(value, field_type):
elif field_type == 'double matrix':
return format_optional_matrix(value)
else:
raise NRRDError('Invalid field type given: %s' % field_type)
raise NRRDError(f'Invalid field type given: {field_type}')


def _handle_header(data, header=None, index_order='F'):
Expand Down Expand Up @@ -220,7 +220,7 @@ def _write_data(data, fh, header, compression_level=None, index_order='F'):
elif header['encoding'] in ['bzip2', 'bz2']:
compressobj = bz2.BZ2Compressor(compression_level)
else:
raise NRRDError('Unsupported encoding: "%s"' % header['encoding'])
raise NRRDError(f'Unsupported encoding: {header["encoding"]}')

# Write the data in chunks (see _WRITE_CHUNKSIZE declaration for more information why)
# Obtain the length of the data since we will be using it repeatedly, more efficient
Expand Down Expand Up @@ -333,15 +333,15 @@ def write(file, data, header=None, detached_header=False, relative_data_path=Tru
# Get the appropriate data filename based on encoding, see here for information on the standard detached
# filename: http://teem.sourceforge.net/nrrd/format.html#encoding
if header['encoding'] == 'raw':
data_filename = '%s.raw' % base_filename
data_filename = f'{base_filename}.raw'
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data_filename = '%s.txt' % base_filename
data_filename = f'{base_filename}.txt'
elif header['encoding'] in ['gzip', 'gz']:
data_filename = '%s.raw.gz' % base_filename
data_filename = f'{base_filename}.raw.gz'
elif header['encoding'] in ['bzip2', 'bz2']:
data_filename = '%s.raw.bz2' % base_filename
data_filename = f'{base_filename}.raw.bz2'
else:
raise NRRDError('Invalid encoding specification while writing NRRD file: %s' % header['encoding'])
raise NRRDError(f'Invalid encoding specification while writing NRRD file: {header["encoding"]}')

# Update the data file field in the header with the path of the detached data
# TODO This will cause problems when the user specifies a relative data path and gives a custom path OUTSIDE
Expand All @@ -351,7 +351,7 @@ def write(file, data, header=None, detached_header=False, relative_data_path=Tru
detached_header = True
elif file.endswith('.nrrd') and detached_header:
data_filename = file
file = '%s.nhdr' % os.path.splitext(file)[0]
file = f'{os.path.splitext(file)[0]}.nhdr'
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
else:
Expand Down

0 comments on commit c3c0d24

Please sign in to comment.