diff --git a/extra_data/components.py b/extra_data/components.py index b75d3207..a6dae700 100644 --- a/extra_data/components.py +++ b/extra_data/components.py @@ -827,7 +827,7 @@ def dask_array(self, *, labelled=False, fill_value=None, astype=None): from dask.array import concatenate, from_delayed entry_size = (self.dtype.itemsize * - len(self.modno_to_keydata) * np.product(self._eg_keydata.entry_shape) + len(self.modno_to_keydata) * np.prod(self._eg_keydata.entry_shape) ) # Aim for 1GB chunks, with an arbitrary maximum of 256 trains split = self.split_trains(frames_per_part=min(1024 ** 3 / entry_size, 256)) @@ -1035,7 +1035,7 @@ def dask_array(self, *, labelled=False, subtrain_index='pulseId', from dask.array import concatenate, from_delayed entry_size = (self.dtype.itemsize * - len(self.modno_to_keydata) * np.product(self._eg_keydata.entry_shape) + len(self.modno_to_keydata) * np.prod(self._eg_keydata.entry_shape) ) if frames_per_chunk is None: # Aim for 2GB chunks, with an arbitrary maximum of 1024 frames diff --git a/extra_data/keydata.py b/extra_data/keydata.py index 3e4430e1..b9c9c50d 100644 --- a/extra_data/keydata.py +++ b/extra_data/keydata.py @@ -481,7 +481,7 @@ def dask_array(self, labelled=False): # affect speed dramatically - but this could depend on many factors. # TODO: optional user control of chunking limit = 2 * 1024 ** 3 - while np.product(chunk_shape) * itemsize > limit and chunk_dim0 > 1: + while np.prod(chunk_shape) * itemsize > limit and chunk_dim0 > 1: chunk_dim0 //= 2 chunk_shape = (chunk_dim0,) + chunk.dataset.shape[1:] diff --git a/extra_data/tests/mockdata/base.py b/extra_data/tests/mockdata/base.py index 54f032b6..2c4284aa 100644 --- a/extra_data/tests/mockdata/base.py +++ b/extra_data/tests/mockdata/base.py @@ -162,7 +162,7 @@ def write_metadata(h5file, data_sources, chunksize=16, format_version='0.5'): if format_version != '0.5': h5file['METADATA/dataFormatVersion'] = [format_version.encode('ascii')] - now = datetime.utcnow().replace(microsecond=0) + now = datetime.now(timezone.utc).replace(microsecond=0) updated_time = now + timedelta(minutes=5) h5file['METADATA/creationDate'] = [ now.strftime('%Y%m%dT%H%M%SZ').encode('ascii') diff --git a/extra_data/tests/mockdata/jungfrau.py b/extra_data/tests/mockdata/jungfrau.py index 2d004b56..aebeaa93 100644 --- a/extra_data/tests/mockdata/jungfrau.py +++ b/extra_data/tests/mockdata/jungfrau.py @@ -3,14 +3,23 @@ class JUNGFRAUModule(DeviceBase): output_channels = ('daqOutput/data',) - instrument_keys = [ - ('adc', 'u2', (16, 512, 1024)), - ('frameNumber', 'u8', (16,)), - ('gain', 'u1', (16, 512, 1024)), - ('mask', 'u2', (16, 512, 1024)), - ('memoryCell', 'u1', (16,)), - ('timestamp', 'f8', (16,)), - ] + def __init__(self, device_id, nsamples=None, raw=False): + super().__init__(device_id, nsamples) + self.raw = raw + + @property + def instrument_keys(self): + return [ + ('frameNumber', 'u8', (16,)), + ('gain', 'u1', (16, 512, 1024)), + ('memoryCell', 'u1', (16,)), + ('timestamp', 'f8', (16,)), + ] + ([ + ('adc', 'u2', (16, 512, 1024)), + ] if self.raw else [ + ('adc', 'f4', (16, 512, 1024)), + ('mask', 'u4', (16, 512, 1024)), + ]) class JUNGFRAUControl(DeviceBase): control_keys = [ diff --git a/extra_data/tests/test_reader_mockdata.py b/extra_data/tests/test_reader_mockdata.py index 846808ef..a857af50 100644 --- a/extra_data/tests/test_reader_mockdata.py +++ b/extra_data/tests/test_reader_mockdata.py @@ -1042,7 +1042,7 @@ def test_inspect_key_no_trains(mock_jungfrau_run): # INSTRUMENT jf_m1_data = sel['SPB_IRDA_JF4M/DET/JNGFR01:daqOutput', 'data.adc'] assert jf_m1_data.shape == (0, 16, 512, 1024) - assert jf_m1_data.dtype == np.dtype(np.uint16) + assert jf_m1_data.dtype == np.dtype(np.float32) def test_run_metadata(mock_spb_raw_run):