diff --git a/brainbox/io/one.py b/brainbox/io/one.py index ed0b15177..baa07a3e4 100644 --- a/brainbox/io/one.py +++ b/brainbox/io/one.py @@ -334,7 +334,6 @@ def _load_channel_locations_traj(eid, probe=None, one=None, revision=None, align # get the channels from histology tracing xyz = xyz[np.argsort(xyz[:, 2]), :] chans = histology.interpolate_along_track(xyz, (depths + TIP_SIZE_UM) / 1e6) - channels[probe] = _channels_traj2bunch(chans, brain_atlas) source = 'traced' channels[probe]['axial_um'] = chn_coords[:, 1] @@ -894,6 +893,7 @@ class SpikeSortingLoader: collection: str = '' histology: str = '' # 'alf', 'resolved', 'aligned' or 'traced' spike_sorting_path: Path = None + _sync: dict = None def __post_init__(self): # pid gets precedence @@ -1039,3 +1039,20 @@ def url(self): """Gets flatiron URL for the session""" webclient = getattr(self.one, '_web_client', None) return webclient.rel_path2url(get_alf_path(self.session_path)) if webclient else None + + def samples2times(self, values, direction='forward'): + """ + :param values: numpy array of times in seconds or samples to resync + :param direction: 'forward' (samples probe time to seconds main time) or 'reverse' + (seconds main time to samples probe time) + :return: + """ + if self._sync is None: + timestamps = self.one.load_dataset( + self.eid, dataset='_spikeglx_*.timestamps.npy', collection=f'raw_ephys_data/{self.pname}') + self._sync = { + 'timestamps': timestamps, + 'forward': interp1d(timestamps[:, 0], timestamps[:, 1], fill_value='extrapolate'), + 'reverse': interp1d(timestamps[:, 1], timestamps[:, 0], fill_value='extrapolate'), + } + return self._sync[direction](values) diff --git a/ibllib/__init__.py b/ibllib/__init__.py index ebb18822e..edd308d56 100644 --- a/ibllib/__init__.py +++ b/ibllib/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.12.2" +__version__ = "2.13.0" import warnings from ibllib.misc import logger_config diff --git a/ibllib/atlas/atlas.py b/ibllib/atlas/atlas.py index ea12b290e..b675bf8dd 100644 --- a/ibllib/atlas/atlas.py +++ b/ibllib/atlas/atlas.py @@ -110,22 +110,55 @@ def _round(i, round=True): else: return i - def x2i(self, x, round=True): - return self._round((x - self.x0) / self.dx, round=round) - - def y2i(self, y, round=True): - return self._round((y - self.y0) / self.dy, round=round) - - def z2i(self, z, round=True): - return self._round((z - self.z0) / self.dz, round=round) + def x2i(self, x, round=True, mode='raise'): + i = np.asarray(self._round((x - self.x0) / self.dx, round=round)) + if np.any(i < 0) or np.any(i >= self.nx): + if mode == 'clip': + i[i < 0] = 0 + i[i >= self.nx] = self.nx - 1 + elif mode == 'raise': + raise ValueError("At least one x value lies outside of the atlas volume.") + elif mode == 'wrap': + pass + return i + + def y2i(self, y, round=True, mode='raise'): + i = np.asarray(self._round((y - self.y0) / self.dy, round=round)) + if np.any(i < 0) or np.any(i >= self.ny): + if mode == 'clip': + i[i < 0] = 0 + i[i >= self.ny] = self.ny - 1 + elif mode == 'raise': + raise ValueError("At least one y value lies outside of the atlas volume.") + elif mode == 'wrap': + pass + return i + + def z2i(self, z, round=True, mode='raise'): + i = np.asarray(self._round((z - self.z0) / self.dz, round=round)) + if np.any(i < 0) or np.any(i >= self.nz): + if mode == 'clip': + i[i < 0] = 0 + i[i >= self.nz] = self.nz - 1 + elif mode == 'raise': + raise ValueError("At least one z value lies outside of the atlas volume.") + elif mode == 'wrap': + pass + return i - def xyz2i(self, xyz, round=True): + def xyz2i(self, xyz, round=True, mode='raise'): + """ + :param mode: {‘raise’, 'clip', 'wrap'} determines what to do when determined index lies outside the atlas volume + 'raise' will raise a ValueError + 'clip' will replace the index with the closest index inside the volume + 'wrap' will wrap around to the other side of the volume. This is only here for legacy reasons + """ xyz = np.array(xyz) dt = int if round else float out = np.zeros_like(xyz, dtype=dt) - out[..., 0] = self.x2i(xyz[..., 0], round=round) - out[..., 1] = self.y2i(xyz[..., 1], round=round) - out[..., 2] = self.z2i(xyz[..., 2], round=round) + out[..., 0] = self.x2i(xyz[..., 0], round=round, mode=mode) + out[..., 1] = self.y2i(xyz[..., 1], round=round, mode=mode) + out[..., 2] = self.z2i(xyz[..., 2], round=round, mode=mode) return out """Methods indices to distance""" @@ -227,7 +260,10 @@ def _get_cache_dir(): def compute_surface(self): """ Get the volume top, bottom, left and right surfaces, and from these the outer surface of - the image volume. This is needed to compute probe insertions intersections + the image volume. This is needed to compute probe insertions intersections. + + NOTE: In places where the top or bottom surface touch the top or bottom of the atlas volume, the surface + will be set to np.nan. If you encounter issues working with these surfaces check if this might be the cause. """ if self.surface is None: # only compute if it hasn't already been computed axz = self.xyz2dims[2] # this is the dv axis @@ -439,7 +475,12 @@ def slice(self, coordinate, axis, volume='image', mode='raise', region_values=No :param mapping: mapping to use. Options can be found using ba.regions.mappings.keys() :return: 2d array or 3d RGB numpy int8 array """ - index = self.bc.xyz2i(np.array([coordinate] * 3))[axis] + if axis == 0: + index = self.bc.x2i(np.array(coordinate), mode=mode) + elif axis == 1: + index = self.bc.y2i(np.array(coordinate), mode=mode) + elif axis == 2: + index = self.bc.z2i(np.array(coordinate), mode=mode) # np.take is 50 thousand times slower than straight slicing ! def _take(vol, ind, axis): @@ -765,7 +806,10 @@ def from_dict(d, brain_atlas=None): if brain_atlas: iy = brain_atlas.bc.y2i(d['y'] / 1e6) ix = brain_atlas.bc.x2i(d['x'] / 1e6) - z = brain_atlas.top[iy, ix] + # Only use the brain surface value as z if it isn't NaN (this happens when the surface touches the edges + # of the atlas volume + if not np.isnan(brain_atlas.top[iy, ix]): + z = brain_atlas.top[iy, ix] return Insertion(x=d['x'] / 1e6, y=d['y'] / 1e6, z=z, phi=d['phi'], theta=d['theta'], depth=d['depth'] / 1e6, beta=d.get('beta', 0), label=d.get('label', '')) diff --git a/ibllib/ephys/neuropixel.py b/ibllib/ephys/neuropixel.py index f40550cb6..17a937819 100644 --- a/ibllib/ephys/neuropixel.py +++ b/ibllib/ephys/neuropixel.py @@ -8,3 +8,4 @@ ', change your imports to neuropixel !', DeprecationWarning) from neuropixel import * # noqa +from neuropixel import SITES_COORDINATES # noqa diff --git a/ibllib/io/extractors/biased_trials.py b/ibllib/io/extractors/biased_trials.py index a802e80d9..a11e7849f 100644 --- a/ibllib/io/extractors/biased_trials.py +++ b/ibllib/io/extractors/biased_trials.py @@ -12,7 +12,6 @@ StimOnTimes_deprecated, StimOnTriggerTimes, StimOnOffFreezeTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, ErrorCueTriggerTimes, PhasePosQuiescence) from ibllib.io.extractors.training_wheel import Wheel -from ibllib.misc import version class ContrastLR(BaseBpodTrialsExtractor): @@ -163,7 +162,7 @@ def extract_all(session_path, save=False, bpod_trials=False, settings=False, ext base = [GoCueTriggerTimes] # Version check - if version.ge(settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): # We now extract a single trials table base.extend([ StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, ErrorCueTriggerTimes, diff --git a/ibllib/io/extractors/bpod_trials.py b/ibllib/io/extractors/bpod_trials.py index fdc213b68..212d9e9bf 100644 --- a/ibllib/io/extractors/bpod_trials.py +++ b/ibllib/io/extractors/bpod_trials.py @@ -5,10 +5,10 @@ import logging from collections import OrderedDict +from pkg_resources import parse_version from ibllib.io.extractors import habituation_trials, training_trials, biased_trials, opto_trials import ibllib.io.extractors.base import ibllib.io.raw_data_loaders as rawio -from ibllib.misc import version _logger = logging.getLogger('ibllib') @@ -54,7 +54,8 @@ def extract_all(session_path, save=True, bpod_trials=None, settings=None): files_wheel = [] wheel = OrderedDict({k: trials.pop(k) for k in tuple(trials.keys()) if 'wheel' in k}) elif extractor_type == 'habituation': - if settings['IBLRIG_VERSION_TAG'] and version.le(settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if settings['IBLRIG_VERSION_TAG'] and \ + parse_version(settings['IBLRIG_VERSION_TAG']) <= parse_version('5.0.0'): _logger.warning("No extraction of legacy habituation sessions") return None, None, None trials, files_trials = habituation_trials.extract_all( diff --git a/ibllib/io/extractors/training_trials.py b/ibllib/io/extractors/training_trials.py index 688f1d742..697134060 100644 --- a/ibllib/io/extractors/training_trials.py +++ b/ibllib/io/extractors/training_trials.py @@ -6,7 +6,6 @@ import ibllib.io.raw_data_loaders as raw from ibllib.io.extractors.base import BaseBpodTrialsExtractor, run_extractor_classes from ibllib.io.extractors.training_wheel import Wheel -from ibllib.misc import version _logger = logging.getLogger('ibllib') @@ -211,7 +210,7 @@ def get_feedback_times_ge5(session_path, data=False): def _extract(self): # Version check - if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(self.settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): merge = self.get_feedback_times_ge5(self.session_path, data=self.bpod_trials) else: merge = self.get_feedback_times_lt5(self.session_path, data=self.bpod_trials) @@ -282,7 +281,7 @@ class GoCueTriggerTimes(BaseBpodTrialsExtractor): var_names = 'goCueTrigger_times' def _extract(self): - if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(self.settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): goCue = np.array([tr['behavior_data']['States timestamps'] ['play_tone'][0][0] for tr in self.bpod_trials]) else: @@ -356,7 +355,7 @@ class IncludedTrials(BaseBpodTrialsExtractor): var_names = 'included' def _extract(self): - if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(self.settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): trials_included = self.get_included_trials_ge5( data=self.bpod_trials, settings=self.settings) else: @@ -513,7 +512,7 @@ def _extract(self): # Version check _logger.warning("Deprecation Warning: this is an old version of stimOn extraction." "From version 5., use StimOnOffFreezeTimes") - if version.ge(self.settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(self.settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): stimOn_times = self.get_stimOn_times_ge5(self.session_path, data=self.bpod_trials) else: stimOn_times = self.get_stimOn_times_lt5(self.session_path, data=self.bpod_trials) @@ -719,7 +718,7 @@ def extract_all(session_path, save=False, bpod_trials=None, settings=None): base = [RepNum, GoCueTriggerTimes] # Version check - if version.ge(settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): # We now extract a single trials table base.extend([ StimOnTriggerTimes, ItiInTimes, StimOffTriggerTimes, StimFreezeTriggerTimes, diff --git a/ibllib/io/raw_data_loaders.py b/ibllib/io/raw_data_loaders.py index e5b9a3a8e..0c8107fe2 100644 --- a/ibllib/io/raw_data_loaders.py +++ b/ibllib/io/raw_data_loaders.py @@ -15,12 +15,12 @@ from pathlib import Path from typing import Union +from pkg_resources import parse_version import numpy as np import pandas as pd from iblutil.io import jsonable from ibllib.io.video import assert_valid_label -from ibllib.misc import version from ibllib.time import uncycle_pgts, convert_pgts _logger = logging.getLogger('ibllib') @@ -374,7 +374,7 @@ def load_encoder_events(session_path, settings=False): settings = {'IBLRIG_VERSION_TAG': '0.0.0'} if not path: return None - if version.ge(settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): return _load_encoder_events_file_ge5(path) else: return _load_encoder_events_file_lt5(path) @@ -479,7 +479,7 @@ def load_encoder_positions(session_path, settings=False): if not path: _logger.warning("No data loaded: could not find raw encoderPositions file") return None - if version.ge(settings['IBLRIG_VERSION_TAG'], '5.0.0'): + if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): return _load_encoder_positions_file_ge5(path) else: return _load_encoder_positions_file_lt5(path) diff --git a/ibllib/misc/version.py b/ibllib/misc/version.py index 26f0e6aca..085b224ac 100644 --- a/ibllib/misc/version.py +++ b/ibllib/misc/version.py @@ -1,4 +1,15 @@ import pkg_resources +import traceback +import warnings + +for line in traceback.format_stack(): + print(line.strip()) + +warnings.warn( + 'ibllib.version is deprecated and functionality will be removed! ' + 'use pkg_resources.parse_version and ibllib.__version__ instead. See stack above.', + DeprecationWarning +) def _compare_version_tag(v1, v2, fcn): diff --git a/ibllib/oneibl/patcher.py b/ibllib/oneibl/patcher.py index a9f91c823..cac4172f6 100644 --- a/ibllib/oneibl/patcher.py +++ b/ibllib/oneibl/patcher.py @@ -12,8 +12,8 @@ from one.alf.spec import is_uuid_string from one import params from one.converters import path_from_dataset +from one.remote import globus -from ibllib.io import globus from ibllib.oneibl.registration import register_dataset _logger = logging.getLogger('ibllib') @@ -97,7 +97,7 @@ def _patch_dataset(self, path, dset_id=None, dry=False, ftp=False): full_remote_path = PurePosixPath(FLATIRON_MOUNT, remote_path) if isinstance(path, WindowsPath) and not ftp: # On Windows replace drive map with Globus uri, e.g. C:/ -> /~/C/ - path = '/~/' + path.as_posix().replace(':', '') + path = globus.as_globus_path(path) status = self._scp(path, full_remote_path, dry=dry)[0] return status @@ -140,8 +140,8 @@ def patch_dataset(self, file_list, dry=False, ftp=False, **kwargs): Rules for creation/patching are the same that apply for registration via Alyx as this uses the registration endpoint to get the dataset. An existing file (same session and path relative to session) will be patched. - :param path: full file path. Must be whithin an ALF session folder (subject/date/number) - can also be a list of full file pathes belonging to the same session. + :param path: full file path. Must be within an ALF session folder (subject/date/number) + can also be a list of full file paths belonging to the same session. :param server_repository: Alyx server repository name :param created_by: alyx username for the dataset (optional, defaults to root) :param ftp: flag for case when using ftppatcher. Don't adjust windows path in @@ -197,13 +197,12 @@ class GlobusPatcher(Patcher): """ - def __init__(self, one=None, globus_client_id=None, local_endpoint=None, label='ibllib patch'): - assert globus_client_id + def __init__(self, client_name='default', one=None, label='ibllib patch'): assert one - self.local_endpoint = local_endpoint or globus.get_local_endpoint() + self.local_endpoint = getattr(globus.load_client_params(f'globus.{client_name}'), + 'local_endpoint', globus.get_local_endpoint_id()) + self.transfer_client = globus.create_globus_client(client_name) self.label = label - self.transfer_client = globus.login_auto( - globus_client_id=globus_client_id, str_app='globus/admin') # transfers/delete from the current computer to the flatiron: mandatory and executed first self.globus_transfer = globus_sdk.TransferData( self.transfer_client, self.local_endpoint, FLAT_IRON_GLOBUS_ID, verify_checksum=True, @@ -296,11 +295,11 @@ def _wait_for_task(resp): # on an errored task # Out[10]: TransferResponse({'bytes_checksummed': 0, 'bytes_transferred': 0, 'canceled_by_admin': None, 'canceled_by_admin_message': None, 'command': 'API 0.10', 'completion_time': '2021-01-03T17:39:00+00:00', 'deadline': '2021-01-04T17:37:34+00:00', 'delete_destination_extra': False, 'destination_endpoint': 'simonsfoundation#ibl', 'destination_endpoint_display_name': 'IBL Flatiron SDSC Data', 'destination_endpoint_id': 'ab2d064c-413d-11eb-b188-0ee0d5d9299f', 'directories': 0, 'effective_bytes_per_second': 0, 'encrypt_data': False, 'fatal_error': {'code': 'CANCELED', 'description': 'canceled'}, 'faults': 2, 'files': 6, 'files_skipped': 0, 'files_transferred': 0, 'history_deleted': False, 'is_ok': None, 'is_paused': False, 'key': 'complete,2021-01-03T17:38:59.697413', 'label': 'test 3B analog sync patch', 'nice_status': None, 'nice_status_details': None, 'nice_status_expires_in': None, 'nice_status_short_description': None, 'owner_id': 'e633663a-8561-4a5d-ac92-f198d43b14dc', 'preserve_timestamp': False, 'recursive_symlinks': 'ignore', 'request_time': '2021-01-03T17:37:34+00:00', 'source_endpoint': 'internationalbrainlab#916c2766-bd2a-11ea-8f22-0a21f750d19b', 'source_endpoint_display_name': 'olivier_laptop', 'source_endpoint_id': '916c2766-bd2a-11ea-8f22-0a21f750d19b', 'status': 'FAILED', 'subtasks_canceled': 6, 'subtasks_expired': 0, 'subtasks_failed': 0, 'subtasks_pending': 0, 'subtasks_retrying': 0, 'subtasks_succeeded': 6, 'subtasks_total': 12, 'symlinks': 0, 'sync_level': 3, 'task_id': '5706dd2c-4dea-11eb-8ffb-0a34088e79f9', 'type': 'TRANSFER', 'username': 'internationalbrainlab', 'verify_checksum': True}) # noqa while True: - tinfo = gtc.get_task(task_id=resp['task_id'])['completion_time'] - if tinfo['completion_time'] is not None: + tinfo = gtc.get_task(task_id=resp['task_id']) + if tinfo and tinfo['completion_time'] is not None: break _ = gtc.task_wait(task_id=resp['task_id'], timeout=30) - if tinfo['fatal_error'] is not None: + if tinfo and tinfo['fatal_error'] is not None: raise ConnectionError(f"Globus transfer failed \n {tinfo}") # handles the transfers first diff --git a/ibllib/oneibl/registration.py b/ibllib/oneibl/registration.py index 9052c8337..b75fad4b8 100644 --- a/ibllib/oneibl/registration.py +++ b/ibllib/oneibl/registration.py @@ -4,14 +4,15 @@ import logging import re +from pkg_resources import parse_version from dateutil import parser as dateparser from iblutil.io import hashfile from one.alf.files import get_session_path import one.alf.exceptions as alferr from one.api import ONE +import ibllib import ibllib.io.extractors.base -from ibllib.misc import version import ibllib.time import ibllib.io.raw_data_loaders as raw from ibllib.io import flags @@ -67,7 +68,7 @@ def register_dataset(file_list, one=None, created_by=None, repository=None, serv assert len(set([get_session_path(f) for f in file_list])) == 1 assert all([Path(f).exists() for f in file_list]) if versions is None: - versions = version.ibllib() + versions = ibllib.__version__ if isinstance(versions, str): versions = [versions for _ in file_list] assert isinstance(versions, list) and len(versions) == len(file_list) @@ -339,7 +340,7 @@ def register_session(self, ses_path, file_list=True): 'filenames': F, 'hashes': md5s, 'filesizes': file_sizes, - 'versions': [version.ibllib() for _ in F] + 'versions': [ibllib.__version__ for _ in F] } self.one.alyx.post('/register-file', data=r_) return session @@ -385,13 +386,15 @@ def _register_bool(fn, file_list): def _read_settings_json_compatibility_enforced(json_file): with open(json_file) as js: md = json.load(js) + if 'IS_MOCK' not in md.keys(): + md['IS_MOCK'] = False if 'IBLRIG_VERSION_TAG' not in md.keys(): md['IBLRIG_VERSION_TAG'] = '3.2.3' if not md['IBLRIG_VERSION_TAG']: _logger.warning("You appear to be on an untagged version...") return md # 2018-12-05 Version 3.2.3 fixes (permanent fixes in IBL_RIG from 3.2.4 on) - if version.le(md['IBLRIG_VERSION_TAG'], '3.2.3'): + if parse_version(md['IBLRIG_VERSION_TAG']) <= parse_version('3.2.3'): if 'LAST_TRIAL_DATA' in md.keys(): md.pop('LAST_TRIAL_DATA') if 'weighings' in md['PYBPOD_SUBJECT_EXTRA'].keys(): @@ -412,7 +415,7 @@ def _read_settings_json_compatibility_enforced(json_file): def rename_files_compatibility(ses_path, version_tag): if not version_tag: return - if version.le(version_tag, '3.2.3'): + if parse_version(version_tag) <= parse_version('3.2.3'): task_code = ses_path.glob('**/_ibl_trials.iti_duration.npy') for fn in task_code: fn.replace(fn.parent.joinpath('_ibl_trials.itiDuration.npy')) diff --git a/ibllib/pipes/local_server.py b/ibllib/pipes/local_server.py index aecc1f36d..a333c2fc6 100644 --- a/ibllib/pipes/local_server.py +++ b/ibllib/pipes/local_server.py @@ -147,7 +147,7 @@ def task_queue(mode='all', lab=None, one=None): if one is None: one = ONE(cache_rest=None) if lab is None: - _logger.info("Trying to infer lab from globus installation") + _logger.debug("Trying to infer lab from globus installation") lab = _get_lab(one) if lab is None: _logger.error("No lab provided or found") diff --git a/ibllib/pipes/tasks.py b/ibllib/pipes/tasks.py index c37bd42cf..758c30049 100644 --- a/ibllib/pipes/tasks.py +++ b/ibllib/pipes/tasks.py @@ -10,7 +10,7 @@ from graphviz import Digraph -from ibllib.misc import version +import ibllib from ibllib.oneibl import data_handlers import one.params from one.api import ONE @@ -30,7 +30,7 @@ class Task(abc.ABC): outputs = None # place holder for a list of Path containing output files time_elapsed_secs = None time_out_secs = 3600 * 2 # time-out after which a task is considered dead - version = version.ibllib() + version = ibllib.__version__ signature = {'input_files': [], 'output_files': []} # list of tuples (filename, collection, required_flag) force = False # whether or not to re-download missing input files on local server if not present @@ -100,7 +100,7 @@ def run(self, **kwargs): _logger.info(f"Starting job {self.__class__}") if self.machine: _logger.info(f"Running on machine: {self.machine}") - _logger.info(f"running ibllib version {version.ibllib()}") + _logger.info(f"running ibllib version {ibllib.__version__}") # setup start_time = time.time() try: @@ -193,7 +193,7 @@ def _run(self, overwrite=False): :param overwrite: (bool) if the output already exists, :return: out_files: files to be registered. Could be a list of files (pathlib.Path), a single file (pathlib.Path) an empty list [] or None. - Whithin the pipeline, there is a distinction between a job that returns an empty list + Within the pipeline, there is a distinction between a job that returns an empty list and a job that returns None. If the function returns None, the job will be labeled as "empty" status in the database, otherwise, the job has an expected behaviour of not returning any dataset. @@ -526,7 +526,8 @@ def run_alyx_task(tdict=None, session_path=None, one=None, job_deck=None, _logger.warning(f"{tdict['name']} has unmet dependencies") # if parents are waiting or failed, set the current task status to Held # once the parents ran, the descendent tasks will be set from Held to Waiting (see below) - if any(map(lambda s: s in ['Errored', 'Held', 'Empty', 'Waiting'], parent_statuses)): + if any(map(lambda s: s in ['Errored', 'Held', 'Empty', 'Waiting', 'Started', 'Abandoned'], + parent_statuses)): tdict = one.alyx.rest('tasks', 'partial_update', id=tdict['id'], data={'status': 'Held'}) return tdict, registered_dsets diff --git a/ibllib/plots/figures.py b/ibllib/plots/figures.py index 47a10dd41..15d508126 100644 --- a/ibllib/plots/figures.py +++ b/ibllib/plots/figures.py @@ -478,16 +478,16 @@ def gain2level(gain): if plot_backend == 'matplotlib': _, axs = plt.subplots(1, 2, gridspec_kw={'width_ratios': [.95, .05]}, figsize=(16, 9)) eqcs.append(Density(butt, fs=fs, taxis=1, ax=axs[0], title='highpass', vmin=eqc_levels[0], vmax=eqc_levels[1], - cmap='Greys')) + cmap='Greys-r')) if destripe: dest = voltage.destripe(raw, fs=fs, channel_labels=channel_labels) _, axs = plt.subplots(1, 2, gridspec_kw={'width_ratios': [.95, .05]}, figsize=(16, 9)) eqcs.append(Density(dest, fs=fs, taxis=1, ax=axs[0], title='destripe', vmin=eqc_levels[0], vmax=eqc_levels[1], - cmap='Greys')) + cmap='Greys-r')) _, axs = plt.subplots(1, 2, gridspec_kw={'width_ratios': [.95, .05]}, figsize=(16, 9)) eqcs.append(Density((butt - dest), fs=fs, taxis=1, ax=axs[0], title='difference', vmin=eqc_levels[0], - vmax=eqc_levels[1], cmap='Greys')) + vmax=eqc_levels[1], cmap='Greys-r')) for eqc in eqcs: y, x = np.meshgrid(ioutside, np.linspace(0, rl * 1e3, 500)) @@ -618,7 +618,7 @@ def raw_destripe(raw, fs, t0, i_plt, n_plt, Tplot = Xs.shape[1] / fs # PLOT RAW DATA - d = Density(-Xs, fs=fs, taxis=1, ax=axs[i_plt], vmin=MIN_X, vmax=MAX_X, cmap='Greys') # noqa + d = Density(-Xs, fs=fs, taxis=1, ax=axs[i_plt], vmin=MIN_X, vmax=MAX_X, cmap='Greys-r') # noqa axs[i_plt].set_ylabel('') axs[i_plt].set_xlim((0, Tplot * 1e3)) axs[i_plt].set_ylim((0, nc)) diff --git a/ibllib/plots/snapshot.py b/ibllib/plots/snapshot.py index 944383d44..6fc67fcda 100644 --- a/ibllib/plots/snapshot.py +++ b/ibllib/plots/snapshot.py @@ -7,10 +7,10 @@ from one.api import ONE from ibllib.pipes import tasks -from ibllib.misc import version from one.alf.exceptions import ALFObjectNotFound from neuropixel import trace_header, TIP_SIZE_UM +from ibllib import __version__ as ibllib_version from ibllib.pipes.ephys_alignment import EphysAlignment from ibllib.pipes.histology import interpolate_along_track from ibllib.atlas import AllenAtlas @@ -36,7 +36,7 @@ def register_images(self, widths=None, function=None, extra_dict=None): jsons = [] texts = [] for f in self.outputs: - json_dict = dict(tag=report_tag, version=version.ibllib(), + json_dict = dict(tag=report_tag, version=ibllib_version, function=(function or str(self.__class__).split("'")[1]), name=f.stem) if extra_dict is not None: assert isinstance(extra_dict, dict) diff --git a/ibllib/tests/test_atlas.py b/ibllib/tests/test_atlas.py index 2eb4fc76f..433dfa652 100644 --- a/ibllib/tests/test_atlas.py +++ b/ibllib/tests/test_atlas.py @@ -312,11 +312,11 @@ def test_sagittal_slice(self): ax.clear() def test_horizontal_slice(self): - ax = self.ba.plot_hslice(dv_coordinate=0.002) + ax = self.ba.plot_hslice(dv_coordinate=-0.002) im = ax.get_images()[0] assert im.get_array().shape == (self.ba.bc.ny, self.ba.bc.nx) ax.clear() - ax = self.ba.plot_hslice(dv_coordinate=0.002, volume='annotation') + ax = self.ba.plot_hslice(dv_coordinate=-0.002, volume='annotation') im = ax.get_images()[0] assert im.get_array().shape == (self.ba.bc.ny, self.ba.bc.nx, 3) ax.clear() @@ -353,9 +353,9 @@ def test_slice(self): # tests output shapes self.assertTrue(ba.slice(axis=0, coordinate=0).shape == (ny, nz)) # sagittal self.assertTrue(ba.slice(axis=1, coordinate=0).shape == (nx, nz)) # coronal - self.assertTrue(ba.slice(axis=2, coordinate=.002).shape == (ny, nx)) # horizontal + self.assertTrue(ba.slice(axis=2, coordinate=-.002).shape == (ny, nx)) # horizontal # tests out of bound - with self.assertRaises(IndexError): + with self.assertRaises(ValueError): ba.slice(axis=1, coordinate=123) self.assertTrue(ba.slice(axis=1, coordinate=21, mode='clip').shape == (nx, nz)) """ @@ -553,9 +553,9 @@ def test_brain_coordinates(self): self.assertTrue(bc.ny == 7) self.assertTrue(bc.nz == 8) # test array functions - in_out = [([6, 7, 8], np.array([6, 7, 8])), - (np.array([6, 7, 8]), np.array([6, 7, 8])), - (np.array([[6, 7, 8], [6, 7, 8]]), np.array([[6, 7, 8], [6, 7, 8]])), + in_out = [([3, 4, 5], np.array([3, 4, 5])), + (np.array([3, 4, 5]), np.array([3, 4, 5])), + (np.array([[3, 4, 5], [3, 4, 5]]), np.array([[3, 4, 5], [3, 4, 5]])), ] for io in in_out: self.assertTrue(np.all(bc.xyz2i(io[0]) == io[1])) diff --git a/release_notes.md b/release_notes.md index f264f004f..21173a563 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,5 +1,14 @@ +## Release Notes 2.13 +## Release Notes 2.13.0 2022-06-30 +- Deprecated ibllib.version +- Fix Globus patcher +- Add SpikeSorting Loader samples2times function +- Fix atlas.BrainCoordinate.xyz2i functions to not quietly wrap indices out of volume bounds. +- Set jobs to Held if parent jobs are Started or Abandoned as well +- Reverse matplotlib colorbars in density displays + ## Release Notes 2.12 -## Release Noter 2.12.2 2022-05-27 +## Release Notes 2.12.2 2022-05-27 - Fixes to plotting in training_status ## Release Notes 2.12.1 2022-05-26