Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix some deprecation warnings from tests #469

Merged
merged 4 commits into from
Dec 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions extra_data/components.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ def dask_array(self, *, labelled=False, fill_value=None, astype=None):
from dask.array import concatenate, from_delayed

entry_size = (self.dtype.itemsize *
len(self.modno_to_keydata) * np.product(self._eg_keydata.entry_shape)
len(self.modno_to_keydata) * np.prod(self._eg_keydata.entry_shape)
)
# Aim for 1GB chunks, with an arbitrary maximum of 256 trains
split = self.split_trains(frames_per_part=min(1024 ** 3 / entry_size, 256))
Expand Down Expand Up @@ -1035,7 +1035,7 @@ def dask_array(self, *, labelled=False, subtrain_index='pulseId',
from dask.array import concatenate, from_delayed

entry_size = (self.dtype.itemsize *
len(self.modno_to_keydata) * np.product(self._eg_keydata.entry_shape)
len(self.modno_to_keydata) * np.prod(self._eg_keydata.entry_shape)
)
if frames_per_chunk is None:
# Aim for 2GB chunks, with an arbitrary maximum of 1024 frames
Expand Down
2 changes: 1 addition & 1 deletion extra_data/keydata.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def dask_array(self, labelled=False):
# affect speed dramatically - but this could depend on many factors.
# TODO: optional user control of chunking
limit = 2 * 1024 ** 3
while np.product(chunk_shape) * itemsize > limit and chunk_dim0 > 1:
while np.prod(chunk_shape) * itemsize > limit and chunk_dim0 > 1:
chunk_dim0 //= 2
chunk_shape = (chunk_dim0,) + chunk.dataset.shape[1:]

Expand Down
2 changes: 1 addition & 1 deletion extra_data/tests/mockdata/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def write_metadata(h5file, data_sources, chunksize=16, format_version='0.5'):

if format_version != '0.5':
h5file['METADATA/dataFormatVersion'] = [format_version.encode('ascii')]
now = datetime.utcnow().replace(microsecond=0)
now = datetime.now(timezone.utc).replace(microsecond=0)
updated_time = now + timedelta(minutes=5)
h5file['METADATA/creationDate'] = [
now.strftime('%Y%m%dT%H%M%SZ').encode('ascii')
Expand Down
25 changes: 17 additions & 8 deletions extra_data/tests/mockdata/jungfrau.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,23 @@
class JUNGFRAUModule(DeviceBase):
output_channels = ('daqOutput/data',)

instrument_keys = [
('adc', 'u2', (16, 512, 1024)),
('frameNumber', 'u8', (16,)),
('gain', 'u1', (16, 512, 1024)),
('mask', 'u2', (16, 512, 1024)),
('memoryCell', 'u1', (16,)),
('timestamp', 'f8', (16,)),
]
def __init__(self, device_id, nsamples=None, raw=False):
super().__init__(device_id, nsamples)
self.raw = raw

@property
def instrument_keys(self):
return [
('frameNumber', 'u8', (16,)),
('gain', 'u1', (16, 512, 1024)),
('memoryCell', 'u1', (16,)),
('timestamp', 'f8', (16,)),
] + ([
('adc', 'u2', (16, 512, 1024)),
] if self.raw else [
('adc', 'f4', (16, 512, 1024)),
('mask', 'u4', (16, 512, 1024)),
])

class JUNGFRAUControl(DeviceBase):
control_keys = [
Expand Down
2 changes: 1 addition & 1 deletion extra_data/tests/test_reader_mockdata.py
Original file line number Diff line number Diff line change
Expand Up @@ -1042,7 +1042,7 @@ def test_inspect_key_no_trains(mock_jungfrau_run):
# INSTRUMENT
jf_m1_data = sel['SPB_IRDA_JF4M/DET/JNGFR01:daqOutput', 'data.adc']
assert jf_m1_data.shape == (0, 16, 512, 1024)
assert jf_m1_data.dtype == np.dtype(np.uint16)
assert jf_m1_data.dtype == np.dtype(np.float32)


def test_run_metadata(mock_spb_raw_run):
Expand Down