From ade48ea5ed5a38259774568ea6567c9ae9d77f89 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Mon, 18 Mar 2024 11:12:04 -0500 Subject: [PATCH 01/60] Fix failing chains pytests (#867) * Fix failing chains pytests * Update docstrings, changelog --- CHANGELOG.md | 4 ++ src/spyglass/utils/dj_chains.py | 74 ++++++++++++++++++--------------- tests/utils/test_chains.py | 19 +++++---- 3 files changed, 55 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea49e3d4e..6b1cf8ebc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Change Log +## [0.5.2] (Unreleased) + +- Refactor `TableChain` to include `_searched` attribute. #867 + ## [0.5.1] (March 7, 2024) ### Infrastructure diff --git a/src/spyglass/utils/dj_chains.py b/src/spyglass/utils/dj_chains.py index 4e05763fc..76ffeb107 100644 --- a/src/spyglass/utils/dj_chains.py +++ b/src/spyglass/utils/dj_chains.py @@ -123,12 +123,13 @@ class TableChain: _link_symbol : str Symbol used to represent the link between parent and child. Hardcoded to " -> ". - _has_link : bool + has_link : bool Cached attribute to store whether parent is linked to child. False if child is not in parent.descendants or nx.NetworkXNoPath is raised by nx.shortest_path. - _has_directed_link : bool - True if directed graph is used to find path. False if undirected graph. + link_type : str + 'directed' or 'undirected' based on whether path is found with directed + or undirected graph. None if no path is found. graph : nx.DiGraph Directed graph of parent's dependencies from datajoint.connection. names : List[str] @@ -175,18 +176,19 @@ def __init__(self, parent: Table, child: Table, connection=None): self._link_symbol = " -> " self.parent = parent self.child = child - self._has_link = True - self._has_directed_link = None + self.link_type = None + self._searched = False if child.full_table_name not in self.graph.nodes: logger.warning( "Can't find item in graph. Try importing: " + f"{child.full_table_name}" ) + self._searched = True def __str__(self): """Return string representation of chain: parent -> child.""" - if not self._has_link: + if not self.has_link: return "No link" return ( to_camel_case(self.parent.table_name) @@ -196,19 +198,22 @@ def __str__(self): def __repr__(self): """Return full representation of chain: parent -> {links} -> child.""" - return ( - "Chain: " - + self._link_symbol.join([t.table_name for t in self.objects]) - if self.names - else "No link" + if not self.has_link: + return "No link" + return "Chain: " + self._link_symbol.join( + [t.table_name for t in self.objects] ) def __len__(self): """Return number of tables in chain.""" + if not self.has_link: + return 0 return len(self.names) def __getitem__(self, index: Union[int, str]) -> dj.FreeTable: """Return FreeTable object at index.""" + if not self.has_link: + return None if isinstance(index, str): for i, name in enumerate(self.names): if index in name: @@ -219,10 +224,12 @@ def __getitem__(self, index: Union[int, str]) -> dj.FreeTable: def has_link(self) -> bool: """Return True if parent is linked to child. - Cached as hidden attribute _has_link to set False if nx.NetworkXNoPath - is raised by nx.shortest_path. + If not searched, search for path. If searched and no link is found, + return False. If searched and link is found, return True. """ - return self._has_link + if not self._searched: + _ = self.path + return self.link_type is not None def pk_link(self, src, trg, data) -> float: """Return 1 if data["primary"] else float("inf"). @@ -242,7 +249,7 @@ def find_path(self, directed=True) -> OrderedDict: If True, use directed graph. If False, use undirected graph. Defaults to True. Undirected permits paths to traverse from merge part-parent -> merge part -> merge table. Undirected excludes - PERIPHERAL_TABLES likne interval_list, nwbfile, etc. + PERIPHERAL_TABLES like interval_list, nwbfile, etc. Returns ------- @@ -265,6 +272,9 @@ def find_path(self, directed=True) -> OrderedDict: path = nx.shortest_path(self.graph, source, target) except nx.NetworkXNoPath: return None + except nx.NodeNotFound: + self._searched = True + return None ret = OrderedDict() prev_table = None @@ -283,27 +293,24 @@ def find_path(self, directed=True) -> OrderedDict: @cached_property def path(self) -> OrderedDict: """Return list of full table names in chain.""" - if not self._has_link: + if self._searched and not self.has_link: return None link = None if link := self.find_path(directed=True): - self._has_directed_link = True + self.link_type = "directed" elif link := self.find_path(directed=False): - self._has_directed_link = False + self.link_type = "undirected" + self._searched = True - if link: - return link - - self._has_link = False - return None + return link @cached_property def names(self) -> List[str]: """Return list of full table names in chain.""" - if self._has_link: - return list(self.path.keys()) - return None + if not self.has_link: + return None + return list(self.path.keys()) @cached_property def objects(self) -> List[dj.FreeTable]: @@ -311,9 +318,9 @@ def objects(self) -> List[dj.FreeTable]: Unused. Preserved for future debugging. """ - if self._has_link: - return [v["free_table"] for v in self.path.values()] - return None + if not self.has_link: + return None + return [v["free_table"] for v in self.path.values()] @cached_property def attr_maps(self) -> List[dict]: @@ -321,10 +328,9 @@ def attr_maps(self) -> List[dict]: Unused. Preserved for future debugging. """ - # - if self._has_link: - return [v["attr_map"] for v in self.path.values()] - return None + if not self.has_link: + return None + return [v["attr_map"] for v in self.path.values()] def join( self, restriction: str = None, reverse_order: bool = False @@ -339,7 +345,7 @@ def join( reverse_order : bool, optional If True, join tables in reverse order. Defaults to False. """ - if not self._has_link: + if not self.has_link: return None restriction = restriction or self.parent.restriction or True diff --git a/tests/utils/test_chains.py b/tests/utils/test_chains.py index bc88e7007..7ba4b1fa2 100644 --- a/tests/utils/test_chains.py +++ b/tests/utils/test_chains.py @@ -1,4 +1,5 @@ import pytest +from datajoint.utils import to_camel_case @pytest.fixture(scope="session") @@ -31,16 +32,13 @@ def test_invalid_chain(Nwbfile, pos_merge_tables, TableChain): def test_chain_str(chain): """Test that the str of a TableChain object is as expected.""" chain = chain - str_got = str(chain) - str_exp = ( - chain.parent.table_name + chain._link_symbol + chain.child.table_name - ) - assert str_got == str_exp, "Unexpected str of TableChain object." + parent = to_camel_case(chain.parent.table_name) + child = to_camel_case(chain.child.table_name) + str_got = str(chain) + str_exp = parent + chain._link_symbol + child -def test_chain_str_no_link(no_link_chain): - """Test that the str of a TableChain object with no link is as expected.""" - assert str(no_link_chain) == "No link", "Unexpected str of no link chain." + assert str_got == str_exp, "Unexpected str of TableChain object." def test_chain_repr(chain): @@ -66,3 +64,8 @@ def test_chain_getitem(chain): def test_nolink_join(no_link_chain): assert no_link_chain.join() is None, "Unexpected join of no link chain." + + +def test_chain_str_no_link(no_link_chain): + """Test that the str of a TableChain object with no link is as expected.""" + assert str(no_link_chain) == "No link", "Unexpected str of no link chain." From 6152f095397722627eae47b6bee0213faa558360 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 20 Mar 2024 10:10:36 -0700 Subject: [PATCH 02/60] Hhmi notebook check (#876) * move restiction when fetch_nwb from merge table into argument * add default ripple parameters for trodes position data * update ripple detection to mediumnwb file * Add curation steps to spikesorting v0 notebook * fix spelling * change import alias --------- Co-authored-by: Eric Denovellis --- notebooks/03_Merge_Tables.ipynb | 8 +- notebooks/10_Spike_SortingV0.ipynb | 1902 +++++++--- notebooks/32_Ripple_Detection.ipynb | 3738 ++++++++++++++----- notebooks/py_scripts/03_Merge_Tables.py | 8 +- notebooks/py_scripts/10_Spike_SortingV0.py | 204 +- notebooks/py_scripts/32_Ripple_Detection.py | 194 +- src/spyglass/ripple/v1/ripple.py | 19 +- 7 files changed, 4474 insertions(+), 1599 deletions(-) diff --git a/notebooks/03_Merge_Tables.ipynb b/notebooks/03_Merge_Tables.ipynb index 2d76867d8..6adbbd5bf 100644 --- a/notebooks/03_Merge_Tables.ipynb +++ b/notebooks/03_Merge_Tables.ipynb @@ -415,7 +415,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -503,7 +503,7 @@ } ], "source": [ - "uuid_key = LFPOutput.fetch(limit=1, as_dict=True)[-1]\n", + "uuid_key = (LFPOutput & nwb_file_dict).fetch(limit=1, as_dict=True)[-1]\n", "restrict = LFPOutput & uuid_key\n", "restrict" ] @@ -546,7 +546,7 @@ } ], "source": [ - "result1 = restrict.fetch_nwb()\n", + "result1 = restrict.fetch_nwb(restrict.fetch1(\"KEY\"))\n", "result1" ] }, @@ -594,7 +594,7 @@ } ], "source": [ - "result2 = (LFPOutput & nwb_key).fetch_nwb()\n", + "result2 = LFPOutput().fetch_nwb(nwb_key)\n", "result2 == result1" ] }, diff --git a/notebooks/10_Spike_SortingV0.ipynb b/notebooks/10_Spike_SortingV0.ipynb index 4c9e50ff6..ca2bf1622 100644 --- a/notebooks/10_Spike_SortingV0.ipynb +++ b/notebooks/10_Spike_SortingV0.ipynb @@ -68,7 +68,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 10, "metadata": { "tags": [] }, @@ -85,6 +85,7 @@ "\n", "import spyglass.common as sgc\n", "import spyglass.spikesorting.v0 as sgs\n", + "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput\n", "\n", "# ignore datajoint+jupyter async warnings\n", "import warnings\n", @@ -109,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -183,8 +184,8 @@ "

admin

\n", " Ignore permission checks\n", " \n", - " Firstname Lastname\n", - "example@gmail.com\n", + " FirstName LastName\n", + "gmail@gmail.com\n", "user\n", "0 \n", " \n", @@ -195,11 +196,11 @@ "text/plain": [ "*lab_member_na google_user_na datajoint_user admin \n", "+------------+ +------------+ +------------+ +-------+\n", - "Firstname Last example@gmail. user 0 \n", + "FirstName Last gmail@gmail.co user 0 \n", " (Total: 1)" ] }, - "execution_count": 2, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -222,7 +223,10 @@ " ],\n", " skip_duplicates=True,\n", ")\n", - "sgc.LabMember.LabMemberInfo()" + "sgc.LabMember.LabMemberInfo() & {\n", + " \"team_name\": \"My Team\",\n", + " \"lab_member_name\": \"Firstname Lastname\",\n", + "}" ] }, { @@ -238,9 +242,17 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You made it in!\n" + ] + } + ], "source": [ "my_team_members = (\n", " (sgc.LabTeam.LabTeamMember & {\"team_name\": \"My Team\"})\n", @@ -292,13 +304,15 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ + "/home/sambray/Documents/spyglass/src/spyglass/data_import/insert_sessions.py:58: UserWarning: Cannot insert data from minirec20230622.nwb: minirec20230622_.nwb is already in Nwbfile table.\n", + " warnings.warn(\n" ] } ], @@ -331,30 +345,15 @@ "commonly use multiple electrodes in a `SortGroup` selected by what tetrode or\n", "shank of a probe they were on.\n", "\n", - "_Note:_ This will delete any existing entries. Answer 'yes' when prompted.\n" + "_Note:_ This will delete any existing entries. Answer 'yes' when prompted, or skip\n", + "running this cell to leave data in place.\n" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 12, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-07-21 13:56:24,232][INFO]: Deleting 128 rows from `spikesorting_recording`.`sort_group__sort_group_electrode`\n", - "[2023-07-21 13:56:24,234][INFO]: Deleting 4 rows from `spikesorting_recording`.`sort_group`\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-07-21 13:56:27,358][INFO]: Deletes committed.\n" - ] - } - ], + "outputs": [], "source": [ "sgs.SortGroup().set_group_by_shank(nwb_file_name)" ] @@ -373,7 +372,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -460,30 +459,30 @@ "0\n", "0\n", "3minirec20230622_.nwb\n", - "0\n", - "0\n", + "1\n", + "1\n", "4minirec20230622_.nwb\n", - "0\n", - "0\n", + "1\n", + "1\n", "5minirec20230622_.nwb\n", - "0\n", - "0\n", + "1\n", + "1\n", "6minirec20230622_.nwb\n", - "0\n", - "0\n", + "1\n", + "1\n", "7minirec20230622_.nwb\n", - "0\n", - "0\n", - "8minirec20230622_.nwb\n", - "0\n", - "0\n", - "9minirec20230622_.nwb\n", - "0\n", - "0\n", - "10minirec20230622_.nwb\n", - "0\n", - "0\n", - "11 \n", + "10\n", + "10\n", + "40minirec20230622_.nwb\n", + "10\n", + "10\n", + "41minirec20230622_.nwb\n", + "10\n", + "10\n", + "42minirec20230622_.nwb\n", + "10\n", + "10\n", + "43 \n", " \n", "

...

\n", "

Total: 128

\n", @@ -496,19 +495,19 @@ "minirec2023062 0 0 1 \n", "minirec2023062 0 0 2 \n", "minirec2023062 0 0 3 \n", - "minirec2023062 0 0 4 \n", - "minirec2023062 0 0 5 \n", - "minirec2023062 0 0 6 \n", - "minirec2023062 0 0 7 \n", - "minirec2023062 0 0 8 \n", - "minirec2023062 0 0 9 \n", - "minirec2023062 0 0 10 \n", - "minirec2023062 0 0 11 \n", + "minirec2023062 1 1 4 \n", + "minirec2023062 1 1 5 \n", + "minirec2023062 1 1 6 \n", + "minirec2023062 1 1 7 \n", + "minirec2023062 10 10 40 \n", + "minirec2023062 10 10 41 \n", + "minirec2023062 10 10 42 \n", + "minirec2023062 10 10 43 \n", " ...\n", " (Total: 128)" ] }, - "execution_count": 18, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -531,7 +530,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 14, "metadata": { "tags": [] }, @@ -602,42 +601,68 @@ " descriptive name of this interval list\n", "
\n", "

valid_times

\n", - " numpy array with start and end times for each interval\n", + " numpy array with start/end times for each interval\n", + "
\n", + "

pipeline

\n", + " type of interval list (e.g. 'position', 'spikesorting_recording_v1')\n", "
\n", " minirec20230622_.nwb\n", "01_s1\n", - "=BLOB=minirec20230622_.nwb\n", + "=BLOB=\n", + "minirec20230622_.nwb\n", + "01_s1_first9\n", + "=BLOB=\n", + "minirec20230622_.nwb\n", + "01_s1_first9 lfp band 100Hz\n", + "=BLOB=\n", + "lfp bandminirec20230622_.nwb\n", "02_s2\n", - "=BLOB=minirec20230622_.nwb\n", + "=BLOB=\n", + "minirec20230622_.nwb\n", + "lfp_test_01_s1_first9_valid times\n", + "=BLOB=\n", + "lfp_v1minirec20230622_.nwb\n", + "lfp_test_01_s1_valid times\n", + "=BLOB=\n", + "lfp_v1minirec20230622_.nwb\n", + "minirec20230622_.nwb_01_s1_first9_0_default_hippocampus\n", + "=BLOB=\n", + "spikesorting_recording_v0minirec20230622_.nwb\n", + "minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times\n", + "=BLOB=\n", + "spikesorting_artifact_v0minirec20230622_.nwb\n", "pos 0 valid times\n", - "=BLOB=minirec20230622_.nwb\n", + "=BLOB=\n", + "minirec20230622_.nwb\n", "pos 1 valid times\n", - "=BLOB=minirec20230622_.nwb\n", - "pos 2 valid times\n", - "=BLOB=minirec20230622_.nwb\n", - "pos 3 valid times\n", - "=BLOB=minirec20230622_.nwb\n", + "=BLOB=\n", + "minirec20230622_.nwb\n", "raw data valid times\n", - "=BLOB= \n", + "=BLOB=\n", + " \n", " \n", " \n", - "

Total: 7

\n", + "

Total: 11

\n", " " ], "text/plain": [ - "*nwb_file_name *interval_list valid_time\n", - "+------------+ +------------+ +--------+\n", - "minirec2023062 01_s1 =BLOB= \n", - "minirec2023062 02_s2 =BLOB= \n", - "minirec2023062 pos 0 valid ti =BLOB= \n", - "minirec2023062 pos 1 valid ti =BLOB= \n", - "minirec2023062 pos 2 valid ti =BLOB= \n", - "minirec2023062 pos 3 valid ti =BLOB= \n", - "minirec2023062 raw data valid =BLOB= \n", - " (Total: 7)" + "*nwb_file_name *interval_list valid_time pipeline \n", + "+------------+ +------------+ +--------+ +------------+\n", + "minirec2023062 01_s1 =BLOB= \n", + "minirec2023062 01_s1_first9 =BLOB= \n", + "minirec2023062 01_s1_first9 l =BLOB= lfp band \n", + "minirec2023062 02_s2 =BLOB= \n", + "minirec2023062 lfp_test_01_s1 =BLOB= lfp_v1 \n", + "minirec2023062 lfp_test_01_s1 =BLOB= lfp_v1 \n", + "minirec2023062 minirec2023062 =BLOB= spikesorting_r\n", + "minirec2023062 minirec2023062 =BLOB= spikesorting_a\n", + "minirec2023062 pos 0 valid ti =BLOB= \n", + "minirec2023062 pos 1 valid ti =BLOB= \n", + "minirec2023062 raw data valid =BLOB= \n", + " (Total: 11)" ] }, - "execution_count": 19, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -655,7 +680,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -694,7 +719,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -712,7 +737,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -735,7 +760,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -775,7 +800,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -844,20 +869,34 @@ " \n", " \n", " default\n", + "=BLOB=default_hippocampus\n", + "=BLOB=default_min_seg\n", + "=BLOB=franklab_default_hippocampus\n", + "=BLOB=franklab_default_hippocampus_min_segment_length\n", + "=BLOB=franklab_tetrode_hippocampus\n", + "=BLOB=franklab_tetrode_hippocampus_min_seg\n", + "=BLOB=lf_test\n", "=BLOB= \n", " \n", " \n", - "

Total: 1

\n", + "

Total: 8

\n", " " ], "text/plain": [ "*preproc_param preproc_pa\n", "+------------+ +--------+\n", "default =BLOB= \n", - " (Total: 1)" + "default_hippoc =BLOB= \n", + "default_min_se =BLOB= \n", + "franklab_defau =BLOB= \n", + "franklab_defau =BLOB= \n", + "franklab_tetro =BLOB= \n", + "franklab_tetro =BLOB= \n", + "lf_test =BLOB= \n", + " (Total: 8)" ] }, - "execution_count": 24, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -875,7 +914,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -905,7 +944,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -932,7 +971,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -941,7 +980,7 @@ "'01_s1'" ] }, - "execution_count": 33, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -952,7 +991,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -978,7 +1017,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -1076,7 +1115,7 @@ " (Total: 1)" ] }, - "execution_count": 35, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -1104,31 +1143,9 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "write_binary_recording with n_jobs = 8 and chunk_size = 299593\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "0b6d4bd70bba4a92bdf77c88d07d4b08", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "write_binary_recording: 0%| | 0/1 [00:0001_s1_first9\n", "default_hippocampus\n", "My Team\n", - "/home/cb/wrk/zOther/data/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus\n", + "/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus\n", "minirec20230622_.nwb_01_s1_first9_0_default_hippocampus \n", " \n", " \n", @@ -1241,11 +1258,11 @@ "text/plain": [ "*nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name recording_path sort_interval_\n", "+------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+\n", - "minirec2023062 0 01_s1_first9 default_hippoc My Team /home/cb/wrk/z minirec2023062\n", + "minirec2023062 0 01_s1_first9 default_hippoc My Team /stelmo/nwb/re minirec2023062\n", " (Total: 1)" ] }, - "execution_count": 39, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -1270,7 +1287,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -1290,7 +1307,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -1392,13 +1409,13 @@ " (Total: 1)" ] }, - "execution_count": 42, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "sgs.ArtifactDetectionSelection().insert1(artifact_key)\n", + "sgs.ArtifactDetectionSelection().insert1(artifact_key, skip_duplicates=True)\n", "sgs.ArtifactDetectionSelection() & artifact_key" ] }, @@ -1412,17 +1429,9 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 30, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Amplitude and zscore thresholds are both None, skipping artifact detection\n" - ] - } - ], + "outputs": [], "source": [ "sgs.ArtifactDetection.populate(artifact_key)" ] @@ -1436,7 +1445,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 31, "metadata": {}, "outputs": [ { @@ -1546,7 +1555,7 @@ " (Total: 1)" ] }, - "execution_count": 44, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -1573,14 +1582,14 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{'sorter': 'mountainsort4', 'sorter_params_name': 'default', 'sorter_params': {'detect_sign': -1, 'adjacency_radius': -1, 'freq_min': 300, 'freq_max': 6000, 'filter': True, 'whiten': True, 'num_workers': 1, 'clip_size': 50, 'detect_threshold': 3, 'detect_interval': 10, 'tempdir': None}}\n" + "{'sorter': 'mountainsort4', 'sorter_params_name': 'default', 'sorter_params': {'detect_sign': -1, 'adjacency_radius': -1, 'freq_min': 300, 'freq_max': 6000, 'filter': True, 'whiten': True, 'num_workers': 1, 'clip_size': 50, 'detect_threshold': 3, 'detect_interval': 10}}\n" ] } ], @@ -1605,7 +1614,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 33, "metadata": { "tags": [] }, @@ -1623,7 +1632,6 @@ " 'freq_max': 0,\n", " 'freq_min': 0,\n", " 'num_workers': 4,\n", - " 'tempdir': None,\n", " 'verbose': True,\n", " 'whiten': False}\n" ] @@ -1659,7 +1667,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 34, "metadata": { "tags": [] }, @@ -1679,11 +1687,10 @@ " 'clip_size': 39,\n", " 'detect_threshold': 3,\n", " 'detect_interval': 10,\n", - " 'tempdir': None,\n", " 'verbose': True}}" ] }, - "execution_count": 53, + "execution_count": 34, "metadata": {}, "output_type": "execute_result" } @@ -1720,7 +1727,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 35, "metadata": {}, "outputs": [ { @@ -1736,7 +1743,7 @@ " 'sorter_params_name': 'hippocampus_tutorial'}" ] }, - "execution_count": 59, + "execution_count": 35, "metadata": {}, "output_type": "execute_result" } @@ -1754,7 +1761,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 36, "metadata": {}, "outputs": [ { @@ -1864,7 +1871,7 @@ " (Total: 1)" ] }, - "execution_count": 60, + "execution_count": 36, "metadata": {}, "output_type": "execute_result" } @@ -1887,478 +1894,110 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 39, "metadata": { "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[11:33:09][INFO] Spyglass: Running spike sorting on {'nwb_file_name': 'minirec20230622_.nwb', 'sort_group_id': 0, 'sort_interval_name': '01_s1_first9', 'preproc_params_name': 'default_hippocampus', 'team_name': 'My Team', 'sorter': 'mountainsort4', 'sorter_params_name': 'hippocampus_tutorial', 'artifact_removed_interval_list_name': 'minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times'}...\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Running spike sorting on {'nwb_file_name': 'minirec20230622_.nwb', 'sort_group_id': 0, 'sort_interval_name': '01_s1_first9', 'preproc_params_name': 'default_hippocampus', 'team_name': 'My Team', 'sorter': 'mountainsort4', 'sorter_params_name': 'hippocampus_tutorial', 'artifact_removed_interval_list_name': 'minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times'}...\n", "Mountainsort4 use the OLD spikeextractors mapped with NewToOldRecording\n", - "Using temporary directory /home/cb/wrk/zOther/data/tmp/tmpr9_xzjwk\n", + "Using temporary directory /stelmo/nwb/tmp/tmpo38gkza9\n", "Using 4 workers.\n", - "Using tempdir: /home/cb/wrk/zOther/data/tmp/tmpr9_xzjwk/tmpo_xved1i\n", + "Using tempdir: /stelmo/nwb/tmp/tmpo38gkza9/tmp05afo_06\n", "Num. workers = 4\n", - "Preparing /home/cb/wrk/zOther/data/tmp/tmpr9_xzjwk/tmpo_xved1i/timeseries.hdf5...\n", - "Preparing neighborhood sorters (M=31, N=269997)...\n", - "Neighboorhood of channel 29 has 5 channels.Neighboorhood of channel 28 has 6 channels.\n", - "\n", - "Detecting events on channel 29 (phase1)...\n", - "Detecting events on channel 30 (phase1)...\n", - "Neighboorhood of channel 23 has 7 channels.\n", - "Detecting events on channel 24 (phase1)...\n", - "Neighboorhood of channel 7 has 7 channels.\n", - "Detecting events on channel 8 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.022525\n", - "Num events detected on channel 30 (phase1): 1\n", - "Computing PCA features for channel 30 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.025604\n", - "Num events detected on channel 29 (phase1): 6\n", - "Computing PCA features for channel 29 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.028793\n", - "Num events detected on channel 24 (phase1): 5\n", - "Computing PCA features for channel 24 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.029814\n", - "Num events detected on channel 8 (phase1): 5\n", - "Computing PCA features for channel 8 (phase1)...\n", - "Clustering for channel 30 (phase1)...\n", - "Found 1 clusters for channel 30 (phase1)...\n", - "Computing templates for channel 30 (phase1)...\n", - "Re-assigning events for channel 30 (phase1)...\n", - "Neighboorhood of channel 17 has 7 channels.\n", - "Detecting events on channel 18 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.096352\n", - "Num events detected on channel 18 (phase1): 5\n", - "Computing PCA features for channel 18 (phase1)...\n", - "Clustering for channel 24 (phase1)...\n", - "Found 1 clusters for channel 24 (phase1)...Clustering for channel 29 (phase1)...\n", - "\n", - "Computing templates for channel 24 (phase1)...\n", - "Found 1 clusters for channel 29 (phase1)...\n", - "Computing templates for channel 29 (phase1)...\n", - "Clustering for channel 8 (phase1)...\n", - "Found 1 clusters for channel 8 (phase1)...\n", - "Computing templates for channel 8 (phase1)...\n", - "Re-assigning events for channel 29 (phase1)...Re-assigning events for channel 24 (phase1)...\n", - "Neighboorhood of channel 20 has 7 channels.\n", - "Detecting events on channel 21 (phase1)...\n", - "\n", - "Neighboorhood of channel 25 has 7 channels.\n", - "Detecting events on channel 26 (phase1)...\n", - "Clustering for channel 18 (phase1)...\n", - "Found 1 clusters for channel 18 (phase1)...\n", - "Computing templates for channel 18 (phase1)...\n", - "Re-assigning events for channel 8 (phase1)...\n", - "Neighboorhood of channel 26 has 7 channels.\n", - "Detecting events on channel 27 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.039213\n", - "Num events detected on channel 26 (phase1): 4\n", - "Computing PCA features for channel 26 (phase1)...\n", - "Elapsed time for detect on neighborhood:Re-assigning events for channel 18 (phase1)...\n", - " 0:00:00.055824\n", - "Num events detected on channel 21 (phase1): 14\n", - "Neighboorhood of channel 14 has 7 channels.Computing PCA features for channel 21 (phase1)...\n", - "\n", - "Detecting events on channel 15 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.029223\n", - "Num events detected on channel 27 (phase1): 8\n", - "Computing PCA features for channel 27 (phase1)...\n", - "Elapsed time for detect on neighborhood:Clustering for channel 27 (phase1)...\n", - "Found 1 clusters for channel 27 (phase1)... 0:00:00.086340\n", - "Num events detected on channel 15 (phase1): 2\n", - "Computing PCA features for channel 15 (phase1)...\n", - "\n", - "Computing templates for channel 27 (phase1)...\n", - "Clustering for channel 26 (phase1)...\n", - "Found 1 clusters for channel 26 (phase1)...\n", - "Computing templates for channel 26 (phase1)...\n", - "Clustering for channel 21 (phase1)...\n", - "Found 1 clusters for channel 21 (phase1)...\n", - "Computing templates for channel 21 (phase1)...\n", - "Re-assigning events for channel 26 (phase1)...Re-assigning events for channel 27 (phase1)...\n", - "Neighboorhood of channel 4 has 7 channels.\n", - "Detecting events on channel 5 (phase1)...\n", - "Re-assigning events for channel 21 (phase1)...\n", - "\n", - "Neighboorhood of channel 15 has 7 channels.Neighboorhood of channel 16 has 7 channels.\n", - "Detecting events on channel 16 (phase1)...\n", - "\n", - "Detecting events on channel 17 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.027093\n", - "Num events detected on channel 5 (phase1): 18\n", - "Computing PCA features for channel 5 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.035564\n", - "Num events detected on channel 16 (phase1): 6\n", - "Computing PCA features for channel 16 (phase1)...\n", - "Clustering for channel 5 (phase1)...\n", - "Found 1 clusters for channel 5 (phase1)...\n", - "Computing templates for channel 5 (phase1)...\n", - "Clustering for channel 16 (phase1)...\n", - "Found 1 clusters for channel 16 (phase1)...\n", - "Computing templates for channel 16 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.091005\n", - "Num events detected on channel 17 (phase1): 17\n", - "Computing PCA features for channel 17 (phase1)...\n", - "Clustering for channel 15 (phase1)...Re-assigning events for channel 5 (phase1)...\n", - "Found 1 clusters for channel 15 (phase1)...\n", - "Computing templates for channel 15 (phase1)...\n", - "\n", - "Neighboorhood of channel 11 has 7 channels.\n", - "Detecting events on channel 12 (phase1)...\n", - "Re-assigning events for channel 16 (phase1)...\n", - "Neighboorhood of channel 12 has 7 channels.\n", - "Detecting events on channel 13 (phase1)...\n", - "Elapsed time for detect on neighborhood:Re-assigning events for channel 15 (phase1)... 0:00:00.040278\n", - "Num events detected on channel 12 (phase1): 7\n", - "Computing PCA features for channel 12 (phase1)...\n", - "\n", - "Neighboorhood of channel 6 has 7 channels.\n", - "Detecting events on channel 7 (phase1)...\n", - "Clustering for channel 17 (phase1)...\n", - "Found 1 clusters for channel 17 (phase1)...\n", - "Computing templates for channel 17 (phase1)...\n", - "Re-assigning events for channel 17 (phase1)...\n", - "Neighboorhood of channel 21 has 7 channels.\n", - "Detecting events on channel 22 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.033808\n", - "Num events detected on channel 7 (phase1): 10\n", - "Computing PCA features for channel 7 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.076235\n", - "Num events detected on channel 13 (phase1): 1\n", - "Computing PCA features for channel 13 (phase1)...\n", - "Clustering for channel 12 (phase1)...\n", - "Found 1 clusters for channel 12 (phase1)...\n", - "Computing templates for channel 12 (phase1)...\n", - "Clustering for channel 13 (phase1)...Re-assigning events for channel 12 (phase1)...\n", - "Neighboorhood of channel 0 has 4 channels.\n", - "Detecting events on channel 1 (phase1)...\n", - "\n", - "Found 1 clusters for channel 13 (phase1)...\n", - "Computing templates for channel 13 (phase1)...\n", - "Elapsed time for detect on neighborhood:Re-assigning events for channel 13 (phase1)...\n", - "Neighboorhood of channel 5 has 7 channels.\n", - "Detecting events on channel 6 (phase1)...\n", - " 0:00:00.026181\n", - "Num events detected on channel 1 (phase1): 3\n", - "Computing PCA features for channel 1 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.133353\n", - "Num events detected on channel 22 (phase1): 3\n", - "Computing PCA features for channel 22 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.025383\n", - "Num events detected on channel 6 (phase1): 2\n", - "Computing PCA features for channel 6 (phase1)...\n", - "Clustering for channel 1 (phase1)...Clustering for channel 7 (phase1)...\n", - "Found 1 clusters for channel 1 (phase1)...\n", - "Computing templates for channel 1 (phase1)...\n", - "\n", - "Found 1 clusters for channel 7 (phase1)...\n", - "Computing templates for channel 7 (phase1)...\n", - "Re-assigning events for channel 1 (phase1)...\n", - "Neighboorhood of channel 13 has 7 channels.Re-assigning events for channel 7 (phase1)...\n", - "Detecting events on channel 14 (phase1)...\n", - "\n", - "Neighboorhood of channel 8 has 7 channels.\n", - "Detecting events on channel 9 (phase1)...\n", - "Clustering for channel 6 (phase1)...\n", - "Found 1 clusters for channel 6 (phase1)...\n", - "Computing templates for channel 6 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.033709\n", - "Num events detected on channel 9 (phase1): 6\n", - "Computing PCA features for channel 9 (phase1)...\n", - "Elapsed time for detect on neighborhood:Re-assigning events for channel 6 (phase1)... 0:00:00.055517\n", - "Num events detected on channel 14 (phase1): 4\n", - "Computing PCA features for channel 14 (phase1)...\n", - "\n", - "Neighboorhood of channel 22 has 7 channels.\n", - "Detecting events on channel 23 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.020576\n", - "Num events detected on channel 23 (phase1): 17\n", - "Computing PCA features for channel 23 (phase1)...\n", - "Clustering for channel 23 (phase1)...Clustering for channel 14 (phase1)...\n", - "Found 1 clusters for channel 14 (phase1)...\n", - "Computing templates for channel 14 (phase1)...\n", - "Clustering for channel 22 (phase1)...\n", - "Found 1 clusters for channel 22 (phase1)...\n", - "Computing templates for channel 22 (phase1)...\n", - "Clustering for channel 9 (phase1)...\n", - "Found 1 clusters for channel 9 (phase1)...\n", - "Computing templates for channel 9 (phase1)...\n", - "\n", - "Found 1 clusters for channel 23 (phase1)...\n", - "Computing templates for channel 23 (phase1)...\n", - "Re-assigning events for channel 14 (phase1)...\n", - "Re-assigning events for channel 9 (phase1)...Neighboorhood of channel 18 has 7 channels.\n", - "Detecting events on channel 19 (phase1)...\n", - "\n", - "Neighboorhood of channel 9 has 7 channels.\n", - "Detecting events on channel 10 (phase1)...\n", - "Re-assigning events for channel 23 (phase1)...\n", - "Neighboorhood of channel 27 has 7 channels.\n", - "Detecting events on channel 28 (phase1)...\n", - "Re-assigning events for channel 22 (phase1)...\n", - "Neighboorhood of channel 3 has 7 channels.\n", - "Detecting events on channel 4 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.035256\n", - "Num events detected on channel 19 (phase1): 11\n", - "Computing PCA features for channel 19 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.040792\n", - "Num events detected on channel 10 (phase1): 5\n", - "Computing PCA features for channel 10 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.033487\n", - "Num events detected on channel 28 (phase1): 4\n", - "Computing PCA features for channel 28 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.029246\n", - "Num events detected on channel 4 (phase1): 10\n", - "Computing PCA features for channel 4 (phase1)...\n", - "Clustering for channel 28 (phase1)...\n", - "Found 1 clusters for channel 28 (phase1)...\n", - "Computing templates for channel 28 (phase1)...\n", - "Re-assigning events for channel 28 (phase1)...\n", - "Clustering for channel 10 (phase1)...\n", - "Found 1 clusters for channel 10 (phase1)...\n", - "Computing templates for channel 10 (phase1)...\n", - "Re-assigning events for channel 10 (phase1)...\n", - "Clustering for channel 19 (phase1)...\n", - "Found 1 clusters for channel 19 (phase1)...\n", - "Computing templates for channel 19 (phase1)...\n", - "Re-assigning events for channel 19 (phase1)...\n", - "Neighboorhood of channel 1 has 5 channels.\n", + "Preparing /stelmo/nwb/tmp/tmpo38gkza9/tmp05afo_06/timeseries.hdf5...\n", + "Preparing neighborhood sorters (M=3, N=269997)...\n", + "Neighboorhood of channel 1 has 3 channels.\n", "Detecting events on channel 2 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.019415\n", - "Num events detected on channel 2 (phase1): 3\n", + "Elapsed time for detect on neighborhood: 0:00:00.035859\n", + "Num events detected on channel 2 (phase1): 334\n", "Computing PCA features for channel 2 (phase1)...\n", - "Clustering for channel 4 (phase1)...\n", - "Found 1 clusters for channel 4 (phase1)...\n", - "Computing templates for channel 4 (phase1)...\n", "Clustering for channel 2 (phase1)...\n", "Found 1 clusters for channel 2 (phase1)...\n", "Computing templates for channel 2 (phase1)...\n", "Re-assigning events for channel 2 (phase1)...\n", - "Neighboorhood of channel 30 has 4 channels.\n", - "Detecting events on channel 31 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.012684\n", - "Num events detected on channel 31 (phase1): 0\n", - "Computing PCA features for channel 31 (phase1)...\n", - "Clustering for channel 31 (phase1)...\n", - "Found 0 clusters for channel 31 (phase1)...\n", - "Computing templates for channel 31 (phase1)...\n", - "Re-assigning events for channel 31 (phase1)...\n", - "Neighboorhood of channel 24 has 7 channels.\n", - "Detecting events on channel 25 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.017826\n", - "Num events detected on channel 25 (phase1): 14\n", - "Computing PCA features for channel 25 (phase1)...\n", - "Re-assigning events for channel 4 (phase1)...\n", - "Neighboorhood of channel 19 has 7 channels.\n", - "Detecting events on channel 20 (phase1)...\n", - "Clustering for channel 25 (phase1)...\n", - "Found 1 clusters for channel 25 (phase1)...\n", - "Computing templates for channel 25 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.022288\n", - "Num events detected on channel 20 (phase1): 14\n", - "Computing PCA features for channel 20 (phase1)...\n", - "Re-assigning events for channel 25 (phase1)...\n", - "Neighboorhood of channel 2 has 6 channels.\n", + "Neighboorhood of channel 2 has 3 channels.\n", "Detecting events on channel 3 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.018741\n", - "Num events detected on channel 3 (phase1): 11\n", + "Elapsed time for detect on neighborhood: 0:00:00.036157\n", + "Num events detected on channel 3 (phase1): 463\n", "Computing PCA features for channel 3 (phase1)...\n", "Clustering for channel 3 (phase1)...\n", "Found 1 clusters for channel 3 (phase1)...\n", "Computing templates for channel 3 (phase1)...\n", "Re-assigning events for channel 3 (phase1)...\n", - "Neighboorhood of channel 10 has 7 channels.\n", - "Detecting events on channel 11 (phase1)...\n", - "Clustering for channel 20 (phase1)...\n", - "Found 1 clusters for channel 20 (phase1)...\n", - "Computing templates for channel 20 (phase1)...\n", - "Re-assigning events for channel 20 (phase1)...\n", - "Elapsed time for detect on neighborhood: 0:00:00.035092\n", - "Num events detected on channel 11 (phase1): 6\n", - "Computing PCA features for channel 11 (phase1)...\n", - "Clustering for channel 11 (phase1)...\n", - "Found 1 clusters for channel 11 (phase1)...\n", - "Computing templates for channel 11 (phase1)...\n", - "Re-assigning events for channel 11 (phase1)...\n", - "Neighboorhood of channel 17 has 7 channels.\n", - "Computing PCA features for channel 18 (phase2)...\n", - "No duplicate events found for channel 17 in phase2\n", - "Neighboorhood of channel 25 has 7 channels.\n", - "Computing PCA features for channel 26 (phase2)...\n", - "No duplicate events found for channel 25 in phase2\n", - "Neighboorhood of channel 4 has 7 channels.\n", - "Computing PCA features for channel 5 (phase2)...\n", - "No duplicate events found for channel 4 in phase2\n", - "Neighboorhood of channel 29 has 5 channels.\n", - "Computing PCA features for channel 30 (phase2)...\n", - "No duplicate events found for channel 29 in phase2\n", - "Clustering for channel 30 (phase2)...\n", - "Found 1 clusters for channel 30 (phase2)...\n", - "Neighboorhood of channel 8 has 7 channels.\n", - "Computing PCA features for channel 9 (phase2)...\n", - "No duplicate events found for channel 8 in phase2\n", - "Clustering for channel 18 (phase2)...\n", - "Found 1 clusters for channel 18 (phase2)...\n", - "Neighboorhood of channel 24 has 7 channels.\n", - "Computing PCA features for channel 25 (phase2)...\n", - "No duplicate events found for channel 24 in phase2\n", - "Clustering for channel 5 (phase2)...\n", - "Found 1 clusters for channel 5 (phase2)...\n", - "Neighboorhood of channel 1 has 5 channels.\n", + "Neighboorhood of channel 0 has 3 channels.\n", + "Detecting events on channel 1 (phase1)...\n", + "Elapsed time for detect on neighborhood: 0:00:00.033028\n", + "Num events detected on channel 1 (phase1): 341\n", + "Computing PCA features for channel 1 (phase1)...\n", + "Clustering for channel 1 (phase1)...\n", + "Found 2 clusters for channel 1 (phase1)...\n", + "Computing templates for channel 1 (phase1)...\n", + "Re-assigning events for channel 1 (phase1)...\n", + "Neighboorhood of channel 1 has 3 channels.\n", "Computing PCA features for channel 2 (phase2)...\n", "No duplicate events found for channel 1 in phase2\n", - "Clustering for channel 9 (phase2)...\n", - "Found 1 clusters for channel 9 (phase2)...\n", - "Neighboorhood of channel 14 has 7 channels.\n", - "Computing PCA features for channel 15 (phase2)...\n", - "No duplicate events found for channel 14 in phase2\n", "Clustering for channel 2 (phase2)...\n", "Found 1 clusters for channel 2 (phase2)...\n", - "Neighboorhood of channel 12 has 7 channels.\n", - "Computing PCA features for channel 13 (phase2)...\n", - "No duplicate events found for channel 12 in phase2\n", - "Clustering for channel 26 (phase2)...\n", - "Found 1 clusters for channel 26 (phase2)...\n", - "Neighboorhood of channel 27 has 7 channels.\n", - "Computing PCA features for channel 28 (phase2)...\n", - "No duplicate events found for channel 27 in phase2\n", - "Clustering for channel 15 (phase2)...\n", - "Found 1 clusters for channel 15 (phase2)...\n", - "Neighboorhood of channel 28 has 6 channels.\n", - "Computing PCA features for channel 29 (phase2)...\n", - "No duplicate events found for channel 28 in phase2\n", - "Clustering for channel 13 (phase2)...\n", - "Found 1 clusters for channel 13 (phase2)...\n", - "Clustering for channel 29 (phase2)...\n", - "Found 1 clusters for channel 29 (phase2)...\n", - "Neighboorhood of channel 16 has 7 channels.\n", - "Computing PCA features for channel 17 (phase2)...\n", - "No duplicate events found for channel 16 in phase2\n", - "Neighboorhood of channel 13 has 7 channels.\n", - "Computing PCA features for channel 14 (phase2)...\n", - "No duplicate events found for channel 13 in phase2\n", - "Clustering for channel 17 (phase2)...\n", - "Found 1 clusters for channel 17 (phase2)...\n", - "Neighboorhood of channel 26 has 7 channels.\n", - "Computing PCA features for channel 27 (phase2)...\n", - "No duplicate events found for channel 26 in phase2\n", - "Clustering for channel 27 (phase2)...Clustering for channel 14 (phase2)...\n", - "\n", - "Found 1 clusters for channel 14 (phase2)...\n", - "Found 1 clusters for channel 27 (phase2)...\n", - "Neighboorhood of channel 18 has 7 channels.\n", - "Computing PCA features for channel 19 (phase2)...\n", - "No duplicate events found for channel 18 in phase2\n", - "Neighboorhood of channel 11 has 7 channels.\n", - "Computing PCA features for channel 12 (phase2)...Clustering for channel 28 (phase2)...\n", - "\n", - "No duplicate events found for channel 11 in phase2\n", - "Found 1 clusters for channel 28 (phase2)...\n", - "Neighboorhood of channel 15 has 7 channels.\n", - "Computing PCA features for channel 16 (phase2)...\n", - "No duplicate events found for channel 15 in phase2\n", - "Clustering for channel 25 (phase2)...\n", - "Found 1 clusters for channel 25 (phase2)...\n", - "Neighboorhood of channel 9 has 7 channels.\n", - "Computing PCA features for channel 10 (phase2)...\n", - "No duplicate events found for channel 9 in phase2\n", - "Clustering for channel 12 (phase2)...\n", - "Found 1 clusters for channel 12 (phase2)...\n", - "Neighboorhood of channel 10 has 7 channels.\n", - "Computing PCA features for channel 11 (phase2)...\n", - "No duplicate events found for channel 10 in phase2\n", - "Clustering for channel 19 (phase2)...\n", - "Found 1 clusters for channel 19 (phase2)...\n", - "Neighboorhood of channel 3 has 7 channels.\n", - "Computing PCA features for channel 4 (phase2)...\n", - "No duplicate events found for channel 3 in phase2\n", - "Clustering for channel 11 (phase2)...\n", - "Found 1 clusters for channel 11 (phase2)...\n", - "Neighboorhood of channel 6 has 7 channels.\n", - "Computing PCA features for channel 7 (phase2)...\n", - "No duplicate events found for channel 6 in phase2\n", - "Clustering for channel 4 (phase2)...\n", - "Found 1 clusters for channel 4 (phase2)...\n", - "Neighboorhood of channel 23 has 7 channels.\n", - "Computing PCA features for channel 24 (phase2)...\n", - "No duplicate events found for channel 23 in phase2\n", - "Clustering for channel 7 (phase2)...\n", - "Found 1 clusters for channel 7 (phase2)...\n", - "Neighboorhood of channel 22 has 7 channels.\n", - "Computing PCA features for channel 23 (phase2)...\n", - "No duplicate events found for channel 22 in phase2\n", - "Clustering for channel 23 (phase2)...Clustering for channel 16 (phase2)...\n", - "\n", - "Found 1 clusters for channel 16 (phase2)...\n", - "Found 1 clusters for channel 23 (phase2)...\n", - "Neighboorhood of channel 5 has 7 channels.\n", - "Computing PCA features for channel 6 (phase2)...\n", - "No duplicate events found for channel 5 in phase2\n", - "Neighboorhood of channel 20 has 7 channels.\n", - "Computing PCA features for channel 21 (phase2)...\n", - "No duplicate events found for channel 20 in phase2\n", - "Clustering for channel 10 (phase2)...\n", - "Found 1 clusters for channel 10 (phase2)...\n", - "Neighboorhood of channel 2 has 6 channels.\n", + "Neighboorhood of channel 2 has 3 channels.\n", "Computing PCA features for channel 3 (phase2)...\n", "No duplicate events found for channel 2 in phase2\n", - "Clustering for channel 24 (phase2)...\n", - "Found 1 clusters for channel 24 (phase2)...\n", - "Clustering for channel 21 (phase2)...\n", - "Found 1 clusters for channel 21 (phase2)...\n", - "Neighboorhood of channel 21 has 7 channels.\n", - "Computing PCA features for channel 22 (phase2)...\n", - "No duplicate events found for channel 21 in phase2\n", - "Clustering for channel 6 (phase2)...\n", - "Found 1 clusters for channel 6 (phase2)...\n", - "Neighboorhood of channel 7 has 7 channels.\n", - "Clustering for channel 22 (phase2)...\n", - "Found 1 clusters for channel 22 (phase2)...\n", - "Computing PCA features for channel 8 (phase2)...\n", - "No duplicate events found for channel 7 in phase2\n", "Clustering for channel 3 (phase2)...\n", "Found 1 clusters for channel 3 (phase2)...\n", - "Neighboorhood of channel 0 has 4 channels.\n", + "Neighboorhood of channel 0 has 3 channels.\n", "Computing PCA features for channel 1 (phase2)...\n", "No duplicate events found for channel 0 in phase2\n", "Clustering for channel 1 (phase2)...\n", "Found 1 clusters for channel 1 (phase2)...\n", - "Neighboorhood of channel 30 has 4 channels.\n", - "Computing PCA features for channel 31 (phase2)...\n", - "No duplicate events found for channel 30 in phase2\n", - "Clustering for channel 31 (phase2)...\n", - "Found 0 clusters for channel 31 (phase2)...\n", - "Clustering for channel 8 (phase2)...\n", - "Found 1 clusters for channel 8 (phase2)...\n", - "Neighboorhood of channel 19 has 7 channels.\n", - "Computing PCA features for channel 20 (phase2)...\n", - "No duplicate events found for channel 19 in phase2\n", - "Clustering for channel 20 (phase2)...\n", - "Found 1 clusters for channel 20 (phase2)...\n", "Preparing output...\n", - "Done with ms4alg.\n", - "Cleaning tempdir::::: /home/cb/wrk/zOther/data/tmp/tmpr9_xzjwk/tmpo_xved1i\n", - "mountainsort4 run time 5.69s\n", - "Saving sorting results...\n" + "Done with ms4alg.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/spikeinterface/sorters/basesorter.py:234: ResourceWarning: unclosed file <_io.TextIOWrapper name='/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus/traces_cached_seg0.raw' mode='r' encoding='UTF-8'>\n", + " SorterClass._run_from_folder(sorter_output_folder, sorter_params, verbose)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "[11:33:24][INFO] Spyglass: Saving sorting results...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cleaning tempdir::::: /stelmo/nwb/tmp/tmpo38gkza9/tmp05afo_06\n", + "mountainsort4 run time 12.62s\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "/home/cb/miniconda3/envs/spy/lib/python3.9/site-packages/spikeinterface/core/basesorting.py:212: UserWarning: The registered recording will not be persistent on disk, but only available in memory\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/spikeinterface/core/basesorting.py:212: UserWarning: The registered recording will not be persistent on disk, but only available in memory\n", " warnings.warn(\"The registered recording will not be persistent on disk, but only available in memory\")\n", - "/home/cb/miniconda3/envs/spy/lib/python3.9/tempfile.py:821: ResourceWarning: Implicitly cleaning up \n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/autopopulate.py:292: ResourceWarning: unclosed file <_io.TextIOWrapper name='/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus/traces_cached_seg0.raw' mode='r' encoding='UTF-8'>\n", + " make(dict(key), **(make_kwargs or {}))\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/tempfile.py:821: ResourceWarning: Implicitly cleaning up \n", " _warnings.warn(warn_message, ResourceWarning)\n" ] } ], "source": [ "# [(sgs.SpikeSortingSelection & ss_key).proj()]\n", - "sgs.SpikeSorting.populate()" + "sgs.SpikeSorting.populate(ss_key)" ] }, { @@ -2370,7 +2009,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 40, "metadata": {}, "outputs": [ { @@ -2470,8 +2109,8 @@ "mountainsort4\n", "hippocampus_tutorial\n", "minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times\n", - "/home/cb/wrk/zOther/data/\"sorting\"/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_3335c236_spikesorting\n", - "1689971050 \n", + "/stelmo/nwb/spikesorting/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_d318c3f1_spikesorting\n", + "1710873204 \n", " \n", " \n", "

Total: 1

\n", @@ -2480,11 +2119,11 @@ "text/plain": [ "*nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo sorting_path time_of_sort \n", "+------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", - "minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 /home/cb/wrk/z 1689971050 \n", + "minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 /stelmo/nwb/sp 1710873204 \n", " (Total: 1)" ] }, - "execution_count": 63, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } @@ -2497,10 +2136,1105 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Next Steps\n", + "## Automatic Curation\n", + "\n", + "Spikesorting algorithms can sometimes identify noise or other undesired features as spiking units.\n", + "Spyglass provides a curation pipeline to detect and label such features to exclude them\n", + "from downstream analysis.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initial Curation\n", + "\n", + "The `Curation` table keeps track of rounds of spikesorting curations in the spikesorting v0 pipeline.\n", + "Before we begin, we first insert an initial curation entry with the spiking results." + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Stores each spike sorting; similar to IntervalList\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

curation_id

\n", + " a number correponding to the index of this curation\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

sort_group_id

\n", + " identifier for a group of electrodes\n", + "
\n", + "

sort_interval_name

\n", + " name for this interval\n", + "
\n", + "

preproc_params_name

\n", + " \n", + "
\n", + "

team_name

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_params_name

\n", + " \n", + "
\n", + "

artifact_removed_interval_list_name

\n", + " \n", + "
\n", + "

parent_curation_id

\n", + " \n", + "
\n", + "

curation_labels

\n", + " a dictionary of labels for the units\n", + "
\n", + "

merge_groups

\n", + " a list of merge groups for the units\n", + "
\n", + "

quality_metrics

\n", + " a list of quality metrics for the units (if available)\n", + "
\n", + "

description

\n", + " optional description for this curated sort\n", + "
\n", + "

time_of_creation

\n", + " in Unix time, to the nearest second\n", + "
0minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times-1=BLOB==BLOB==BLOB=1710873795
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*curation_id *nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo parent_curatio curation_l merge_grou quality_me description time_of_creati\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------------+ +--------+ +--------+ +--------+ +------------+ +------------+\n", + "0 minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 -1 =BLOB= =BLOB= =BLOB= 1710873795 \n", + " (Total: 1)" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "for sorting_key in (sgs.SpikeSorting() & ss_key).fetch(\"KEY\"):\n", + " # insert_curation will make an entry with a new curation_id regardless of whether it already exists\n", + " # to avoid this, we check if the curation already exists\n", + " if not (sgs.Curation() & sorting_key):\n", + " sgs.Curation.insert_curation(sorting_key)\n", + "\n", + "sgs.Curation() & ss_key" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Waveform Extraction\n", + "\n", + "Some metrics used for curating units are dependent on features of the spike waveform.\n", + "We extract these for each unit's initial curation here" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'waveform_params_name': 'default_whitened',\n", + " 'waveform_params': {'ms_before': 0.5,\n", + " 'ms_after': 0.5,\n", + " 'max_spikes_per_unit': 5000,\n", + " 'n_jobs': 5,\n", + " 'total_memory': '5G',\n", + " 'whiten': True}}" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Parameters used for waveform extraction from the recording\n", + "waveform_params_name = \"default_whitened\"\n", + "(\n", + " sgs.WaveformParameters() & {\"waveform_params_name\": waveform_params_name}\n", + ").fetch(as_dict=True)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[11:48:56][INFO] Spyglass: Extracting waveforms...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2f908ec0ce9d47bc8c35d448d3fb2e79", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "extract waveforms memmap: 0%| | 0/1 [00:00\n", + " self.pid = os.fork()\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/multiprocessing/popen_fork.py:66: ResourceWarning: Unclosed socket \n", + " self.pid = os.fork()\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/multiprocessing/popen_fork.py:66: ResourceWarning: Unclosed socket \n", + " self.pid = os.fork()\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "[11:48:57][INFO] Spyglass: Writing new NWB file minirec20230622_4ZZBN5G9DY.nwb\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/build/objectmapper.py:260: DtypeConversionWarning: Spec 'Units/spike_times': Value with data type int64 is being converted to data type float64 as specified.\n", + " warnings.warn(full_warning_msg, DtypeConversionWarning)\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_4ZZBN5G9DY.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/external.py:276: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n", + " if check_hash:\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/autopopulate.py:292: ResourceWarning: unclosed file <_io.TextIOWrapper name='/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus/traces_cached_seg0.raw' mode='r' encoding='UTF-8'>\n", + " make(dict(key), **(make_kwargs or {}))\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + } + ], + "source": [ + "# extract waveforms\n", + "curation_keys = [\n", + " {**k, \"waveform_params_name\": waveform_params_name}\n", + " for k in (sgs.Curation() & ss_key & {\"curation_id\": 0}).fetch(\"KEY\")\n", + "]\n", + "sgs.WaveformSelection.insert(curation_keys, skip_duplicates=True)\n", + "sgs.Waveforms.populate(ss_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Quality Metrics\n", + "\n", + "With these waveforms, we can calculate the metrics used to determine the quality of each unit." + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'snr': {'peak_sign': 'neg',\n", + " 'random_chunk_kwargs_dict': {'num_chunks_per_segment': 20,\n", + " 'chunk_size': 10000,\n", + " 'seed': 0}},\n", + " 'isi_violation': {'isi_threshold_ms': 1.5, 'min_isi_ms': 0.0},\n", + " 'nn_isolation': {'max_spikes': 1000,\n", + " 'min_spikes': 10,\n", + " 'n_neighbors': 5,\n", + " 'n_components': 7,\n", + " 'radius_um': 100,\n", + " 'seed': 0},\n", + " 'nn_noise_overlap': {'max_spikes': 1000,\n", + " 'min_spikes': 10,\n", + " 'n_neighbors': 5,\n", + " 'n_components': 7,\n", + " 'radius_um': 100,\n", + " 'seed': 0},\n", + " 'peak_channel': {'peak_sign': 'neg'},\n", + " 'num_spikes': {}}" + ] + }, + "execution_count": 58, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# parameters which define what quality metrics are calculated and how\n", + "metric_params_name = \"franklab_default3\"\n", + "(sgs.MetricParameters() & {\"metric_params_name\": metric_params_name}).fetch(\n", + " \"metric_params\"\n", + ")[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/spikeinterface/postprocessing/template_tools.py:23: DeprecationWarning: The spikeinterface.postprocessing.template_tools is submodule is deprecated.Use spikeinterface.core.template_tools instead\n", + " _warn()\n", + "[12:17:37][INFO] Spyglass: Computed all metrics: {'snr': {1: 3.627503, 2: 3.598743, 3: 3.6419973}, 'isi_violation': {'1': 0.03896103896103896, '2': 0.036065573770491806, '3': 0.03488372093023256}, 'nn_isolation': {'1': 0.9591503267973855, '2': 0.9594771241830065, '3': 0.9872549019607844}, 'nn_noise_overlap': {'1': 0.49642857142857144, '2': 0.44738562091503264, '3': 0.4}, 'peak_channel': {1: 1, 2: 2, 3: 3}, 'num_spikes': {'1': 309, '2': 306, '3': 431}}\n", + "[12:17:37][INFO] Spyglass: Writing new NWB file minirec20230622_L3O536PHYB.nwb\n", + "[12:17:38][INFO] Spyglass: Adding metric snr : {1: 3.627503, 2: 3.598743, 3: 3.6419973}\n", + "[12:17:38][INFO] Spyglass: Adding metric isi_violation : {'1': 0.03896103896103896, '2': 0.036065573770491806, '3': 0.03488372093023256}\n", + "[12:17:38][INFO] Spyglass: Adding metric nn_isolation : {'1': 0.9591503267973855, '2': 0.9594771241830065, '3': 0.9872549019607844}\n", + "[12:17:38][INFO] Spyglass: Adding metric nn_noise_overlap : {'1': 0.49642857142857144, '2': 0.44738562091503264, '3': 0.4}\n", + "[12:17:38][INFO] Spyglass: Adding metric peak_channel : {1: 1, 2: 2, 3: 3}\n", + "[12:17:38][INFO] Spyglass: Adding metric num_spikes : {'1': 309, '2': 306, '3': 431}\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_L3O536PHYB.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/external.py:276: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n", + " if check_hash:\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/autopopulate.py:292: ResourceWarning: unclosed file <_io.TextIOWrapper name='/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus/traces_cached_seg0.raw' mode='r' encoding='UTF-8'>\n", + " make(dict(key), **(make_kwargs or {}))\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

curation_id

\n", + " a number correponding to the index of this curation\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

sort_group_id

\n", + " identifier for a group of electrodes\n", + "
\n", + "

sort_interval_name

\n", + " name for this interval\n", + "
\n", + "

preproc_params_name

\n", + " \n", + "
\n", + "

team_name

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_params_name

\n", + " \n", + "
\n", + "

artifact_removed_interval_list_name

\n", + " \n", + "
\n", + "

waveform_params_name

\n", + " name of waveform extraction parameters\n", + "
\n", + "

metric_params_name

\n", + " \n", + "
\n", + "

quality_metrics_path

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

object_id

\n", + " Object ID for the metrics in NWB file\n", + "
0minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_timesdefault_whitenedfranklab_default3/stelmo/nwb/waveforms/minirec20230622_.nwb_0105557c_0_default_whitened_waveforms_qm.jsonminirec20230622_L3O536PHYB.nwb4b1512bc-861f-4710-8fff-55aad7fbb6ba
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*curation_id *nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo *waveform_para *metric_params quality_metric analysis_file_ object_id \n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "0 minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 default_whiten franklab_defau /stelmo/nwb/wa minirec2023062 4b1512bc-861f-\n", + " (Total: 1)" + ] + }, + "execution_count": 59, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "waveform_keys = [\n", + " {**k, \"metric_params_name\": metric_params_name}\n", + " for k in (sgs.Waveforms() & ss_key).fetch(\"KEY\")\n", + "]\n", + "sgs.MetricSelection.insert(waveform_keys, skip_duplicates=True)\n", + "sgs.QualityMetrics().populate(ss_key)\n", + "sgs.QualityMetrics() & ss_key" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_L3O536PHYB.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
snrisi_violationnn_isolationnn_noise_overlappeak_channelnum_spikes
id
13.6275030.0389610.9591500.4964291309
23.5987430.0360660.9594770.4473862306
33.6419970.0348840.9872550.4000003431
\n", + "
" + ], + "text/plain": [ + " snr isi_violation nn_isolation nn_noise_overlap peak_channel \\\n", + "id \n", + "1 3.627503 0.038961 0.959150 0.496429 1 \n", + "2 3.598743 0.036066 0.959477 0.447386 2 \n", + "3 3.641997 0.034884 0.987255 0.400000 3 \n", + "\n", + " num_spikes \n", + "id \n", + "1 309 \n", + "2 306 \n", + "3 431 " + ] + }, + "execution_count": 64, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Look at the quality metrics for the first curation\n", + "(sgs.QualityMetrics() & ss_key).fetch_nwb()[0][\"object_id\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Automatic Curation Labeling\n", + "\n", + "With these metrics, we can assign labels to the sorted units using the `AutomaticCuration` table" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'auto_curation_params_name': 'default',\n", + " 'merge_params': {},\n", + " 'label_params': {'nn_noise_overlap': ['>', 0.1, ['noise', 'reject']]}}" + ] + }, + "execution_count": 67, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# We can select our criteria for unit labeling here\n", + "auto_curation_params_name = \"default\"\n", + "(\n", + " sgs.AutomaticCurationParameters()\n", + " & {\"auto_curation_params_name\": auto_curation_params_name}\n", + ").fetch1()" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Stores each spike sorting; similar to IntervalList\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

curation_id

\n", + " a number correponding to the index of this curation\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

sort_group_id

\n", + " identifier for a group of electrodes\n", + "
\n", + "

sort_interval_name

\n", + " name for this interval\n", + "
\n", + "

preproc_params_name

\n", + " \n", + "
\n", + "

team_name

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_params_name

\n", + " \n", + "
\n", + "

artifact_removed_interval_list_name

\n", + " \n", + "
\n", + "

parent_curation_id

\n", + " \n", + "
\n", + "

curation_labels

\n", + " a dictionary of labels for the units\n", + "
\n", + "

merge_groups

\n", + " a list of merge groups for the units\n", + "
\n", + "

quality_metrics

\n", + " a list of quality metrics for the units (if available)\n", + "
\n", + "

description

\n", + " optional description for this curated sort\n", + "
\n", + "

time_of_creation

\n", + " in Unix time, to the nearest second\n", + "
0minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times-1=BLOB==BLOB==BLOB=1710873795
1minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times0=BLOB==BLOB==BLOB=auto curated1710876397
\n", + " \n", + "

Total: 2

\n", + " " + ], + "text/plain": [ + "*curation_id *nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo parent_curatio curation_l merge_grou quality_me description time_of_creati\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------------+ +--------+ +--------+ +--------+ +------------+ +------------+\n", + "0 minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 -1 =BLOB= =BLOB= =BLOB= 1710873795 \n", + "1 minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 0 =BLOB= =BLOB= =BLOB= auto curated 1710876397 \n", + " (Total: 2)" + ] + }, + "execution_count": 72, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# We can now apply the automatic curation criteria to the quality metrics\n", + "metric_keys = [\n", + " {**k, \"auto_curation_params_name\": auto_curation_params_name}\n", + " for k in (sgs.QualityMetrics() & ss_key).fetch(\"KEY\")\n", + "]\n", + "sgs.AutomaticCurationSelection.insert(metric_keys, skip_duplicates=True)\n", + "# populating this table will make a new entry in the curation table\n", + "sgs.AutomaticCuration().populate(ss_key)\n", + "sgs.Curation() & ss_key" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Insert desired curation into downstream and merge tables for future analysis\n", + "\n", + "Now that we've performed auto-curation, we can insert the results of our chosen curation into \n", + "`CuratedSpikeSorting` (the final table of this pipeline), and the merge table `SpikeSortingOutput`.\n", + "Downstream analyses such as decoding will access the spiking data from there" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

merge_id

\n", + " \n", + "
\n", + "

curation_id

\n", + " a number correponding to the index of this curation\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

sort_group_id

\n", + " identifier for a group of electrodes\n", + "
\n", + "

sort_interval_name

\n", + " name for this interval\n", + "
\n", + "

preproc_params_name

\n", + " \n", + "
\n", + "

team_name

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_params_name

\n", + " \n", + "
\n", + "

artifact_removed_interval_list_name

\n", + " \n", + "
662f3e35-c81e-546c-69c3-b3a2f5ed27761minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*merge_id curation_id nwb_file_name sort_group_id sort_interval_ preproc_params team_name sorter sorter_params_ artifact_remov\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+\n", + "662f3e35-c81e- 1 minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062\n", + " (Total: 1)" + ] + }, + "execution_count": 82, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get the curation keys corresponding to the automatic curation\n", + "auto_curation_key_list = (sgs.AutomaticCuration() & ss_key).fetch(\n", + " \"auto_curation_key\"\n", + ")\n", + "\n", + "# insert into CuratedSpikeSorting\n", + "for auto_key in auto_curation_key_list:\n", + " # get the full key information needed\n", + " curation_auto_key = (sgs.Curation() & auto_key).fetch1(\"KEY\")\n", + " sgs.CuratedSpikeSortingSelection.insert1(\n", + " curation_auto_key, skip_duplicates=True\n", + " )\n", + "sgs.CuratedSpikeSorting.populate(ss_key)\n", + "\n", + "# Add the curated spike sorting to the SpikeSortingOutput merge table\n", + "keys_for_merge_tables = (\n", + " sgs.CuratedSpikeSorting & auto_curation_key_list\n", + ").fetch(\"KEY\")\n", + "SpikeSortingOutput.insert(\n", + " keys_for_merge_tables,\n", + " skip_duplicates=True,\n", + " part_name=\"CuratedSpikeSorting\",\n", + ")\n", + "# Here's our result!\n", + "SpikeSortingOutput.CuratedSpikeSorting() & ss_key" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Manual Curation with figurl" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As of June 2021, members of the Frank Lab can use the `sortingview` web app for\n", + "manual curation. To make use of this, we need to populate the `CurationFigurl` table.\n", + "\n", + "We begin by selecting a starting point from the curation entries. In this case we will use\n", + "the AutomaticCuration populated above as a starting point for manual curation, though you could also \n", + "start from the opriginal curation entry by selecting the proper key from the `Curation` table\n", + "\n", + "_Note_: This step requires setting up your kachery sharing through the [sharing notebook](02_Data_Sync.ipynb)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[16:04:49][INFO] Spyglass: Preparing spikesortingview data\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initial pass: segment 0\n", + "Segment 0 of 1\n", + "/stelmo/nwb/kachery-cloud/sha1/de/41/7a/de417af585eda5dc274c1389ad1b28ef1a0580ab\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/autopopulate.py:292: ResourceWarning: unclosed file <_io.TextIOWrapper name='/stelmo/nwb/recording/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus/traces_cached_seg0.raw' mode='r' encoding='UTF-8'>\n", + " make(dict(key), **(make_kwargs or {}))\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + } + ], + "source": [ + "starting_curations = (sgs.AutomaticCuration() & ss_key).fetch(\n", + " \"auto_curation_key\"\n", + ") # you could also select any key from the sgs.Curation table here\n", + "\n", + "username = \"username\"\n", + "fig_url_repo = f\"gh://LorenFrankLab/sorting-curations/main/{username}/\" # settings for franklab members\n", + "\n", + "sort_interval_name = interval_list_name\n", + "gh_url = (\n", + " fig_url_repo\n", + " + str(nwb_file_name + \"_\" + sort_interval_name) # session id\n", + " + \"/{}\" # tetrode using auto_id['sort_group_id']\n", + " + \"/curation.json\"\n", + ") # url where the curation is stored\n", + "\n", + "for auto_id in starting_curations:\n", + " auto_curation_out_key = dict(\n", + " **(sgs.Curation() & auto_id).fetch1(\"KEY\"),\n", + " new_curation_uri=gh_url.format(str(auto_id[\"sort_group_id\"])),\n", + " )\n", + " sgs.CurationFigurlSelection.insert1(\n", + " auto_curation_out_key, skip_duplicates=True\n", + " )\n", + " sgs.CurationFigurl.populate(auto_curation_out_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then access the url for the curation figurl like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://figurl.org/f?v=npm://@fi-sci/figurl-sortingview@12/dist&d=sha1://b0d9355ba302bcbcb7005822796fc850c06b6d3d&s={\"initialSortingCuration\":\"sha1://2800ea072728fd141d8e5bc88525ac0c6c137d04\",\"sortingCuration\":\"gh://LorenFrankLab/sorting-curations/main/sambray/minirec20230622_.nwb_01_s1/0/curation.json\"}&label=minirec20230622_.nwb_01_s1_first9_0_default_hippocampus%20minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_42be1215_spikesorting&zone=franklab.collaborators\n" + ] + } + ], + "source": [ + "print((sgs.CurationFigurl & ss_key).fetch(\"url\")[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will take you to a workspace on the `sortingview` app. The workspace, which\n", + "you can think of as a list of recording and associated sorting objects, was\n", + "created at the end of spike sorting. On the workspace view, you will see a set\n", + "of recordings that have been added to the workspace.\n", + "\n", + "![Workspace view](./../notebook-images/workspace.png)\n", + "\n", + "Clicking on a recording then takes you to a page that gives you information\n", + "about the recording as well as the associated sorting objects.\n", + "\n", + "![Recording view](./../notebook-images/recording.png)\n", + "\n", + "Click on a sorting to see the curation view. Try exploring the many\n", + "visualization widgets.\n", + "\n", + "![Unit table](./../notebook-images/unittable.png)\n", + "\n", + "The most important is the `Units Table` and the `Curation` menu, which allows\n", + "you to give labels to the units. The curation labels will persist even if you\n", + "suddenly lose connection to the app; this is because the curation actions are\n", + "appended to the workspace as soon as they are created. Note that if you are not\n", + "logged in with your Google account, `Curation` menu may not be visible. Log in\n", + "and refresh the page to access this feature.\n", "\n", - "Congratulations, you've spike sorted! See our\n", - "[next notebook](./03_Curation.ipynb) for curation steps.\n" + "![Curation](./../notebook-images/curation.png)\n" ] } ], @@ -2520,7 +3254,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.9.16" }, "vscode": { "interpreter": { diff --git a/notebooks/32_Ripple_Detection.ipynb b/notebooks/32_Ripple_Detection.ipynb index 01c833cac..b9a37e279 100644 --- a/notebooks/32_Ripple_Detection.ipynb +++ b/notebooks/32_Ripple_Detection.ipynb @@ -48,24 +48,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 32, "id": "fc5e9860", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2024-01-24 09:56:01,323][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", - "[2024-01-24 09:56:01,355][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n", - "[09:56:02][WARNING] Spyglass: Please update position_tools to >= 0.1.0\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "import datajoint as dj\n", "import numpy as np\n", + "import pandas as pd\n", "\n", "# change to the upper level folder to detect dj_local_conf.json\n", "if os.path.basename(os.getcwd()) == \"notebooks\":\n", @@ -73,11 +64,10 @@ "dj.config.load(\"dj_local_conf.json\") # load config for database connection info\n", "\n", "import spyglass.common as sgc\n", - "import spyglass.position as sgp\n", - "import spyglass.lfp as lfp\n", + "import spyglass.position.v1 as sgp\n", "import spyglass.lfp.analysis.v1 as lfp_analysis\n", "from spyglass.lfp import LFPOutput\n", - "import spyglass.lfp.v1 as sglfp\n", + "import spyglass.lfp as sglfp\n", "from spyglass.position import PositionOutput\n", "import spyglass.ripple.v1 as sgrip\n", "import spyglass.ripple.v1 as sgr\n", @@ -90,209 +80,1739 @@ ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "d741727d-6609-465a-add8-b5c4c9ab844c", - "metadata": { - "tags": [] - }, + "id": "154d132d", + "metadata": {}, "source": [ - "## Selecting Electrodes\n" + "## Generate LFP Ripple Band" ] }, { "cell_type": "markdown", - "id": "c571fe1a", + "id": "22384148", "metadata": {}, "source": [ - "First, we'll pick the electrodes on which we'll run ripple detection on, using\n", - "`RippleLFPSelection.set_lfp_electrodes`\n" + "First, we need to generate a filter band from the LFP data at the ripple frequency. This process is analogous to that in [30_LFP.ipynb](31_Theta.ipynb). " ] }, { - "cell_type": "code", - "execution_count": 2, - "id": "902494c3-37ec-4550-b14c-4b17df7d0ec7", + "cell_type": "markdown", + "id": "3576ddd1", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[0;31mSignature:\u001b[0m\n", - "\u001b[0msgr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRippleLFPSelection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_lfp_electrodes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0melectrode_list\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0mgroup_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'CA1'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", - "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mDocstring:\u001b[0m\n", - "Removes all electrodes for the specified nwb file and then\n", - "adds back the electrodes in the list\n", - "\n", - "Parameters\n", - "----------\n", - "key : dict\n", - " dictionary corresponding to the LFPBand entry to use for\n", - " ripple detection\n", - "electrode_list : list\n", - " list of electrodes from LFPBandSelection.LFPBandElectrode\n", - " to be used as the ripple LFP during detection\n", - "group_name : str, optional\n", - " description of the electrode group, by default \"CA1\"\n", - "\u001b[0;31mFile:\u001b[0m ~/Documents/spyglass/src/spyglass/ripple/v1/ripple.py\n", - "\u001b[0;31mType:\u001b[0m function" - ] - } - ], "source": [ - "?sgr.RippleLFPSelection.set_lfp_electrodes" + "#### Make LFP" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "355190ce-a553-44d6-8260-7eda67f9407f", + "id": "011ab00c", "metadata": {}, "source": [ - "We'll need the `nwb_file_name`, an `electrode_list`, and to a `group_name`.\n", - "\n", - "- By default, `group_name` is set to CA1 for ripple detection, but we could\n", - " alternatively use PFC.\n", - "- We use `nwb_file_name` to explore which electrodes are available for the\n", - " `electrode_list`.\n" + "If you have already populated the LFP table for your data you may skip this step. Here, we will begin by creating a lfp group of just electrodes in the hippocampus region, and populate the lfp on a subsetted interval:" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "5e29339b-3d2b-4fb4-a966-6166c6a40ab8", + "execution_count": 9, + "id": "ead482d5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

lfp_electrode_group_name

\n", + " the name of this group of electrodes\n", + "
\n", + "

target_interval_list_name

\n", + " descriptive name of this interval list\n", + "
\n", + "

filter_name

\n", + " descriptive name of this filter\n", + "
\n", + "

filter_sampling_rate

\n", + " sampling rate for this filter\n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

interval_list_name

\n", + " descriptive name of this interval list\n", + "
\n", + "

lfp_object_id

\n", + " the NWB object ID for loading this object from the file\n", + "
\n", + "

lfp_sampling_rate

\n", + " the sampling rate, in HZ\n", + "
mediumnwb20230802_.nwbtest_hippocampus02_r1_ripple_demoLFP 0-400 Hz30000mediumnwb20230802_7625J294O4.nwblfp_test_hippocampus_02_r1_ripple_demo_valid times95ac8100-eca8-4dff-a504-b1c139a2a3af1000.0
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*nwb_file_name *lfp_electrode *target_interv *filter_name *filter_sampli analysis_file_ interval_list_ lfp_object_id lfp_sampling_r\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "mediumnwb20230 test_hippocamp 02_r1_ripple_d LFP 0-400 Hz 30000 mediumnwb20230 lfp_test_hippo 95ac8100-eca8- 1000.0 \n", + " (Total: 1)" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "nwb_file_name = \"tonks20211103_.nwb\"\n", - "interval_list_name = \"test interval\"\n", - "filter_name = \"Ripple 150-250 Hz\"\n", - "if not sgc.Session & {\"nwb_file_name\": nwb_file_name}:\n", - " # This error will be raised when notebooks auto-run with 'minirec'\n", - " raise ValueError(f\"Session with nwb_file_name={nwb_file_name} not found\")" + "nwb_file_name = \"mediumnwb20230802_.nwb\"\n", + "lfp_electrode_group_name = \"test_hippocampus\"\n", + "interval_list_name = \"02_r1_ripple_demo\"\n", + "\n", + "# select hippocampus electrodes\n", + "electrodes_df = (\n", + " pd.DataFrame(\n", + " (\n", + " sgc.Electrode\n", + " & {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " }\n", + " )\n", + " * (sgc.BrainRegion & {\"region_name\": \"hippocampus\"})\n", + " )\n", + " .loc[:, [\"nwb_file_name\", \"electrode_id\", \"region_name\"]]\n", + " .sort_values(by=\"electrode_id\")\n", + ")\n", + "# create lfp_electrode_group\n", + "lfp_eg_key = {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"lfp_electrode_group_name\": lfp_electrode_group_name,\n", + "}\n", + "sglfp.lfp_electrode.LFPElectrodeGroup.create_lfp_electrode_group(\n", + " nwb_file_name=nwb_file_name,\n", + " group_name=lfp_electrode_group_name,\n", + " electrode_list=electrodes_df.electrode_id.tolist(),\n", + ")\n", + "\n", + "# make a shorter interval to run this demo on\n", + "interval_start = (\n", + " sgc.IntervalList\n", + " & {\"nwb_file_name\": nwb_file_name, \"interval_list_name\": \"02_r1\"}\n", + ").fetch1(\"valid_times\")[0][0]\n", + "truncated_interval = np.array(\n", + " [[interval_start, interval_start + 120]]\n", + ") # first 2 minutes of epoch\n", + "sgc.IntervalList.insert1(\n", + " {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"interval_list_name\": \"02_r1_ripple_demo\",\n", + " \"valid_times\": truncated_interval,\n", + " },\n", + " skip_duplicates=True,\n", + ")\n", + "\n", + "# make the lfp selection\n", + "lfp_s_key = lfp_eg_key.copy()\n", + "lfp_s_key.update(\n", + " {\n", + " \"target_interval_list_name\": interval_list_name,\n", + " \"filter_name\": \"LFP 0-400 Hz\",\n", + " \"filter_sampling_rate\": 30_000, # sampling rate of the data (Hz)\n", + " \"target_sampling_rate\": 1_000, # smpling rate of the lfp output (Hz)\n", + " }\n", + ")\n", + "sglfp.v1.LFPSelection.insert1(lfp_s_key, skip_duplicates=True)\n", + "\n", + "# populate the lfp\n", + "sglfp.v1.LFPV1.populate(lfp_s_key, display_progress=True)\n", + "sglfp.v1.LFPV1 & lfp_s_key" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "ac9c694f-06dd-47e8-9379-ec37397758f8", + "id": "29f67591", "metadata": {}, "source": [ - "Now we can look at `electrode_id` in the `Electrode` table:\n" + "#### Populate Ripple Band\n", + "We now create a filter for this frequency band" ] }, { "cell_type": "code", "execution_count": 10, - "id": "7c9f301d-7199-4c20-b008-38a1c16f1d04", + "id": "23824836", "metadata": {}, "outputs": [ { "data": { "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " /* Show the tooltip text when you mouse over the tooltip container */\n", + " .djtooltip:hover .djtooltiptext {\n", + " visibility: visible;\n", + " }\n", + " \n", + " \n", + " \n", + "
\n", + "
probe_idprobe_shankprobe_electrodenameoriginal_reference_electrodexyzfilteringimpedancebad_channelx_warpedy_warpedz_warpedcontactsregion_name
\n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

filter_name

\n", + " descriptive name of this filter\n", + "
\n", + "

filter_sampling_rate

\n", + " sampling rate for this filter\n", + "
\n", + "

filter_type

\n", + " \n", + "
\n", + "

filter_low_stop

\n", + " lowest frequency for stop band for low frequency side of filter\n", + "
\n", + "

filter_low_pass

\n", + " lowest frequency for pass band of low frequency side of filter\n", + "
\n", + "

filter_high_pass

\n", + " highest frequency for pass band for high frequency side of filter\n", + "
\n", + "

filter_high_stop

\n", + " highest frequency for stop band of high frequency side of filter\n", + "
\n", + "

filter_comments

\n", + " comments about the filter\n", + "
\n", + "

filter_band_edges

\n", + " numpy array containing the filter bands (redundant with individual parameters)\n", + "
\n", + "

filter_coeff

\n", + " numpy array containing the filter coefficients\n", + "
Ripple 150-250 Hz1000lowpass140.0150.0250.0260.0ripple band filter for 1 kHz data=BLOB==BLOB=
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*filter_name *filter_sampli filter_type filter_low_sto filter_low_pas filter_high_pa filter_high_st filter_comment filter_ban filter_coe\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +--------+ +--------+\n", + "Ripple 150-250 1000 lowpass 140.0 150.0 250.0 260.0 ripple band fi =BLOB= =BLOB= \n", + " (Total: 1)" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sgc.FirFilterParameters().add_filter(\n", + " filter_name=\"Ripple 150-250 Hz\",\n", + " fs=1000.0,\n", + " filter_type=\"bandpass\",\n", + " band_edges=[140, 150, 250, 260],\n", + " comments=\"ripple band filter for 1 kHz data\",\n", + ")\n", + "\n", + "sgc.FirFilterParameters() & \"filter_name='Ripple 150-250 Hz'\"" + ] + }, + { + "cell_type": "markdown", + "id": "494a270c", + "metadata": {}, + "source": [ + "We can then populate the ripple band" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "080a5fec", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

lfp_merge_id

\n", + " \n", + "
\n", + "

filter_name

\n", + " descriptive name of this filter\n", + "
\n", + "

filter_sampling_rate

\n", + " sampling rate for this filter\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

target_interval_list_name

\n", + " descriptive name of this interval list\n", + "
\n", + "

lfp_band_sampling_rate

\n", + " the sampling rate for this band\n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

interval_list_name

\n", + " descriptive name of this interval list\n", + "
\n", + "

lfp_band_object_id

\n", + " the NWB object ID for loading this object from the file\n", + "
e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz1000mediumnwb20230802_.nwb02_r1_ripple_demo1000mediumnwb20230802_XVQHZZVY3B.nwb02_r1_ripple_demo lfp band 1000Hzeca68871-9a8d-4fa4-b265-95dafdfdda29
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*lfp_merge_id *filter_name *filter_sampli *nwb_file_name *target_interv *lfp_band_samp analysis_file_ interval_list_ lfp_band_objec\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "e5c8a41b-5bb1- Ripple 150-250 1000 mediumnwb20230 02_r1_ripple_d 1000 mediumnwb20230 02_r1_ripple_d eca68871-9a8d-\n", + " (Total: 1)" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from spyglass.lfp.analysis.v1 import lfp_band\n", + "\n", + "filter_name = \"Ripple 150-250 Hz\"\n", + "lfp_band_electrode_ids = (\n", + " electrodes_df.electrode_id.tolist()\n", + ") # assumes we've filtered these electrodes\n", + "lfp_band_sampling_rate = 1000 # desired sampling rate\n", + "\n", + "lfp_merge_id = (LFPOutput.LFPV1() & lfp_s_key).fetch1(\"merge_id\")\n", + "lfp_band.LFPBandSelection().set_lfp_band_electrodes(\n", + " nwb_file_name=nwb_file_name,\n", + " lfp_merge_id=lfp_merge_id,\n", + " electrode_list=lfp_band_electrode_ids,\n", + " filter_name=filter_name,\n", + " interval_list_name=interval_list_name,\n", + " reference_electrode_list=[-1], # -1 means no ref electrode for all channels\n", + " lfp_band_sampling_rate=lfp_band_sampling_rate,\n", + ")\n", + "\n", + "lfp_band.LFPBandV1.populate(\n", + " {\"lfp_merge_id\": lfp_merge_id, \"filter_name\": filter_name},\n", + " display_progress=True,\n", + ")\n", + "lfp_band.LFPBandV1 & {\"lfp_merge_id\": lfp_merge_id, \"filter_name\": filter_name}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d741727d-6609-465a-add8-b5c4c9ab844c", + "metadata": { + "tags": [] + }, + "source": [ + "## Selecting Ripple Analysis Electrodes\n" + ] + }, + { + "cell_type": "markdown", + "id": "c571fe1a", + "metadata": {}, + "source": [ + "Next, we'll pick the electrodes on which we'll run ripple detection on, using\n", + "`RippleLFPSelection.set_lfp_electrodes`\n" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "902494c3-37ec-4550-b14c-4b17df7d0ec7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[0;31mSignature:\u001b[0m\n", + "\u001b[0msgr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRippleLFPSelection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_lfp_electrodes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0melectrode_list\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mgroup_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'CA1'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mDocstring:\u001b[0m\n", + "Removes all electrodes for the specified nwb file and then\n", + "adds back the electrodes in the list\n", + "\n", + "Parameters\n", + "----------\n", + "key : dict\n", + " dictionary corresponding to the LFPBand entry to use for\n", + " ripple detection\n", + "electrode_list : list\n", + " list of electrodes from LFPBandSelection.LFPBandElectrode\n", + " to be used as the ripple LFP during detection\n", + "group_name : str, optional\n", + " description of the electrode group, by default \"CA1\"\n", + "\u001b[0;31mFile:\u001b[0m ~/Documents/spyglass/src/spyglass/ripple/v1/ripple.py\n", + "\u001b[0;31mType:\u001b[0m function" + ] + } + ], + "source": [ + "?sgr.RippleLFPSelection.set_lfp_electrodes" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "355190ce-a553-44d6-8260-7eda67f9407f", + "metadata": {}, + "source": [ + "We'll need the `nwb_file_name`, an `electrode_list`, and to a `group_name`.\n", + "\n", + "- By default, `group_name` is set to CA1 for ripple detection, but we could\n", + " alternatively use PFC.\n", + "- We use `nwb_file_name` to explore which electrodes are available for the\n", + " `electrode_list`.\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "ac9c694f-06dd-47e8-9379-ec37397758f8", + "metadata": {}, + "source": [ + "Now we can look at `electrode_id` in the `Electrode` table:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "7c9f301d-7199-4c20-b008-38a1c16f1d04", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
probe_idprobe_shankprobe_electrodenameoriginal_reference_electrodexyzfilteringimpedancebad_channelx_warpedy_warpedz_warpedcontactsregion_namesubregion_namesubsubregion_name
nwb_file_nameelectrode_group_nameelectrode_idlfp_merge_idfilter_namefilter_sampling_ratetarget_interval_list_namelfp_band_sampling_ratelfp_electrode_group_namereference_elect_idregion_id
mediumnwb20230802_.nwb00e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.500080.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.501180.00.00.0None0.0False0.00.00.0hippocampusNoneNone
2e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.502280.00.00.0None0.0False0.00.00.0hippocampusNoneNone
3e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.503380.00.00.0None0.0False0.00.00.0hippocampusNoneNone
14e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.500480.00.00.0None0.0False0.00.00.0hippocampusNoneNone
....................................................................................
835e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5033580.00.00.0None0.0False0.00.00.0hippocampusNoneNone
936e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5003680.00.00.0None0.0False0.00.00.0hippocampusNoneNone
37e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5013780.00.00.0None0.0False0.00.00.0hippocampusNoneNone
38e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5023880.00.00.0None0.0False0.00.00.0hippocampusNoneNone
39e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5033980.00.00.0None0.0False0.00.00.0hippocampusNoneNone
\n", + "

88 rows × 18 columns

\n", + "
" + ], + "text/plain": [ + " probe_id \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + "\n", + " probe_shank \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + "\n", + " probe_electrode \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 1 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 2 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 3 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 3 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 1 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 2 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 3 \n", + "\n", + " name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 1 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 2 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 3 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 4 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 35 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 36 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 37 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 38 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 39 \n", + "\n", + " original_reference_electrode \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + "\n", + " x \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " y \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " z \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " filtering \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "\n", + " impedance \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " bad_channel \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + "\n", + " x_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " y_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " z_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + "\n", + " contacts \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + "\n", + " region_name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + "\n", + " subregion_name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "\n", + " subsubregion_name \n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 2 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "... ... \n", + " 8 35 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 37 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 38 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 39 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + "\n", + "[88 rows x 18 columns]" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "electrodes = (\n", + " (sgc.Electrode() & {\"nwb_file_name\": nwb_file_name})\n", + " * (\n", + " lfp_analysis.LFPBandSelection.LFPBandElectrode()\n", + " & {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"filter_name\": filter_name,\n", + " \"target_interval_list_name\": interval_list_name,\n", + " }\n", + " )\n", + " * sgc.BrainRegion\n", + ").fetch(format=\"frame\")\n", + "electrodes" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c400f47e-a21b-451a-8721-35191161dc6e", + "metadata": {}, + "source": [ + "For ripple detection, we want only tetrodes, and only the first good wire on each tetrode. We will assume that is the first wire on each tetrode. I will do this using pandas syntax but you could use datajoint to filter this table as well. Here is the filtered table.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "83b9c019-a171-4d77-bccc-7c9945af1692", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", - " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -303,20 +1823,26 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -327,26 +1853,26 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -357,20 +1883,26 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -381,278 +1913,86 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - "
probe_idprobe_shankprobe_electrodenameoriginal_reference_electrodexyzfilteringimpedancebad_channelx_warpedy_warpedz_warpedcontactsregion_namesubregion_namesubsubregion_name
nwb_file_nameelectrode_group_nameelectrode_idlfp_merge_idfilter_namefilter_sampling_ratetarget_interval_list_namelfp_band_sampling_ratelfp_electrode_group_namereference_elect_idregion_idnwb_file_nameelectrode_group_nameelectrode_idlfp_merge_idfilter_namefilter_sampling_ratetarget_interval_list_namelfp_band_sampling_ratelfp_electrode_group_namereference_elect_idregion_id
mediumnwb20230802_.nwb00e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.500080.00.00.0None0.0False0.00.00.0hippocampusNoneNone
14e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.500480.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1040e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5004080.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1144e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5004480.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1352e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5005280.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1456e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5005680.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1560e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5006080.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1664e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5006480.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1768e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5006880.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1872e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5007280.00.00.0None0.0False0.00.00.0hippocampusNoneNone
1976e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5007680.00.00.0None0.0False0.00.00.0hippocampusNoneNone
tonks20211103_.nwb7282f3c93d5-5d5d-2d47-75b3-c346dddbd312Ripple 150-250 Hz1000test interval100CA1_test2080e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-11916tetrode_12.50028448080.00.00.00.00.0ca1hippocampusNoneNone
2184e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000CA1_testtest_hippocampus-11916tetrode_12.50028448480.00.00.00.00.0ca1hippocampusNoneNone
8322f3c93d5-5d5d-2d47-75b3-c346dddbd312Ripple 150-250 Hz1000test interval100CA1_test2288e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-11916tetrode_12.50032448880.00.00.00.00.0ca1hippocampusNoneNone
2392e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz1000CA1_test02_r1_ripple_demo1000test_hippocampus-11916tetrode_12.50032449280.00.00.00.00.0ca1hippocampusNoneNone
\n", - "
" - ], - "text/plain": [ - " probe_id \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 tetrode_12.5 \n", - " 1000 CA1_test -1 19 tetrode_12.5 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 tetrode_12.5 \n", - " 1000 CA1_test -1 19 tetrode_12.5 \n", - "\n", - " probe_shank \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - "\n", - " probe_electrode \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - "\n", - " name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 28 \n", - " 1000 CA1_test -1 19 28 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 32 \n", - " 1000 CA1_test -1 19 32 \n", - "\n", - " original_reference_electrode \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 44 \n", - " 1000 CA1_test -1 19 44 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 44 \n", - " 1000 CA1_test -1 19 44 \n", - "\n", - " x \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " y \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " z \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " filtering \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - "\n", - " impedance \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " bad_channel \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 False \n", - " 1000 CA1_test -1 19 False \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 False \n", - " 1000 CA1_test -1 19 False \n", - "\n", - " x_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " y_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " z_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - "\n", - " contacts \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 \n", - " 1000 CA1_test -1 19 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 \n", - " 1000 CA1_test -1 19 \n", - "\n", - " region_name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 ca1 \n", - " 1000 CA1_test -1 19 ca1 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 ca1 \n", - " 1000 CA1_test -1 19 ca1 \n", - "\n", - " subregion_name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - "\n", - " subsubregion_name \n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None " - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "electrodes = (\n", - " (sgc.Electrode() & {\"nwb_file_name\": nwb_file_name})\n", - " * (\n", - " lfp_analysis.LFPBandSelection.LFPBandElectrode()\n", - " & {\n", - " \"nwb_file_name\": nwb_file_name,\n", - " \"filter_name\": filter_name,\n", - " \"target_interval_list_name\": interval_list_name,\n", - " }\n", - " )\n", - " * sgc.BrainRegion\n", - ").fetch(format=\"frame\")\n", - "electrodes" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c400f47e-a21b-451a-8721-35191161dc6e", - "metadata": {}, - "source": [ - "For ripple detection, we want only tetrodes, and only the first good wire on each tetrode. We will assume that is the first wire on each tetrode. I will do this using pandas syntax but you could use datajoint to filter this table as well. Here is the filtered table.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "83b9c019-a171-4d77-bccc-7c9945af1692", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", - " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -663,20 +2003,56 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", @@ -687,26 +2063,26 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", @@ -717,20 +2093,26 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -741,7 +2123,7 @@ " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", @@ -750,134 +2132,458 @@ "" ], "text/plain": [ - " probe_id \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 tetrode_12.5 \n", - " 1000 CA1_test -1 19 tetrode_12.5 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 tetrode_12.5 \n", - " 1000 CA1_test -1 19 tetrode_12.5 \n", + " probe_id \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 tetrode_12.5 \n", "\n", - " probe_shank \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", + " probe_shank \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", "\n", - " probe_electrode \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0 \n", - " 1000 CA1_test -1 19 0 \n", + " probe_electrode \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", "\n", - " name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 28 \n", - " 1000 CA1_test -1 19 28 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 32 \n", - " 1000 CA1_test -1 19 32 \n", + " name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 4 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 40 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 44 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 52 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 56 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 60 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 64 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 68 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 72 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 76 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 80 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 84 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 88 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 92 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 12 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 16 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 20 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 24 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 28 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 32 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 36 \n", "\n", - " original_reference_electrode \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 44 \n", - " 1000 CA1_test -1 19 44 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 44 \n", - " 1000 CA1_test -1 19 44 \n", + " original_reference_electrode \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 8 \n", "\n", - " x \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " x \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " y \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " y \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " z \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " z \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " filtering \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", + " filtering \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", "\n", - " impedance \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " impedance \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " bad_channel \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 False \n", - " 1000 CA1_test -1 19 False \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 False \n", - " 1000 CA1_test -1 19 False \n", + " bad_channel \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 False \n", "\n", - " x_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " x_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " y_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " y_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " z_warped \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 0.0 \n", - " 1000 CA1_test -1 19 0.0 \n", + " z_warped \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 0.0 \n", "\n", - " contacts \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 \n", - " 1000 CA1_test -1 19 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 \n", - " 1000 CA1_test -1 19 \n", + " contacts \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 \n", "\n", - " region_name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 ca1 \n", - " 1000 CA1_test -1 19 ca1 \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 ca1 \n", - " 1000 CA1_test -1 19 ca1 \n", + " region_name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 hippocampus \n", "\n", - " subregion_name \\\n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", + " subregion_name \\\n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", "\n", - " subsubregion_name \n", - "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", - "tonks20211103_.nwb 7 28 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None \n", - " 8 32 2f3c93d5-5d5d-2d47-75b3-c346dddbd312 Ripple 150-250 Hz 1000 test interval 100 CA1_test -1 19 None \n", - " 1000 CA1_test -1 19 None " + " subsubregion_name \n", + "nwb_file_name electrode_group_name electrode_id lfp_merge_id filter_name filter_sampling_rate target_interval_list_name lfp_band_sampling_rate lfp_electrode_group_name reference_elect_id region_id \n", + "mediumnwb20230802_.nwb 0 0 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 1 4 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 10 40 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 11 44 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 13 52 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 14 56 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 15 60 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 16 64 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 17 68 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 18 72 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 19 76 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 20 80 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 21 84 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 22 88 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 23 92 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 3 12 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 4 16 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 5 20 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 6 24 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 7 28 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 8 32 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None \n", + " 9 36 e5c8a41b-5bb1-c12c-d306-e80e5491d6dd Ripple 150-250 Hz 1000 02_r1_ripple_demo 1000 test_hippocampus -1 16 None " ] }, - "execution_count": 5, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -900,7 +2606,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 22, "id": "0eba0656-c4dd-42bd-b18f-23aac13eb70a", "metadata": {}, "outputs": [], @@ -934,7 +2640,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 24, "id": "345709c2-2892-407d-ab2d-d3fd36f67d69", "metadata": {}, "outputs": [], @@ -959,7 +2665,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 25, "id": "96eb2aae-e93a-4214-a5e2-8c729767634b", "metadata": {}, "outputs": [ @@ -1059,7 +2765,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1069,7 +2775,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1079,7 +2785,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1089,7 +2795,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1099,7 +2805,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1109,7 +2815,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1119,7 +2825,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1129,7 +2835,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1139,7 +2845,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1149,7 +2855,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1159,7 +2865,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1169,7 +2875,7 @@ "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1178,29 +2884,29 @@ "\n", "
probe_idprobe_shankprobe_electrodenameoriginal_reference_electrodexyzfilteringimpedancebad_channelx_warpedy_warpedz_warpedcontactsregion_namesubregion_namesubsubregion_name
312e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5001280.00.00.0None0.0False0.00.00.0hippocampusNoneNone
nwb_file_nameelectrode_group_nameelectrode_idlfp_merge_idfilter_namefilter_sampling_ratetarget_interval_list_namelfp_band_sampling_ratelfp_electrode_group_namereference_elect_idregion_id416e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.5001680.00.00.0None0.0False0.00.00.0hippocampusNoneNone
tonks20211103_.nwb7282f3c93d5-5d5d-2d47-75b3-c346dddbd312Ripple 150-250 Hz1000test interval100CA1_test520e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-11916tetrode_12.50028442080.00.00.00.00.0ca1hippocampusNoneNone
624e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000CA1_testtest_hippocampus-11916tetrode_12.5002480.00.00.0None0.0False0.00.00.0hippocampusNoneNone
728e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-116tetrode_12.500284480.00.00.00.00.0ca1hippocampusNoneNone
8322f3c93d5-5d5d-2d47-75b3-c346dddbd312Ripple 150-250 Hz1000test interval100CA1_test832e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000test_hippocampus-11916tetrode_12.500324480.00.00.00.00.0ca1hippocampusNoneNone
936e5c8a41b-5bb1-c12c-d306-e80e5491d6ddRipple 150-250 Hz100002_r1_ripple_demo1000CA1_testtest_hippocampus-11916tetrode_12.50032443680.00.00.00.00.0ca1hippocampusNoneNone
Ripple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_WinnieRipple 150-250 Hz1000Winnie20220714_.nwbpos 9 valid timesWinnie20220714_.nwb_pos 9 valid times_LFP_default_difference1000CA1tetrode_sample_Winnie-1
\n", "

...

\n", - "

Total: 60265

\n", + "

Total: 60159

\n", " " ], "text/plain": [ "*lfp_merge_id *filter_name *filter_sampli *nwb_file_name *target_interv *lfp_band_samp *group_name *lfp_electrode *electrode_gro *electrode_id *reference_ele\n", "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 0 0 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 1 4 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 11 44 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 12 49 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 13 52 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 14 56 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 16 64 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 17 68 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 18 72 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 19 76 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 2 8 -1 \n", - "0087e094-8238- Ripple 150-250 1000 Winnie20220714 pos 9 valid ti 1000 CA1 tetrode_sample 20 80 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 0 0 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 1 4 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 11 44 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 12 49 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 13 52 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 14 56 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 16 64 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 17 68 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 18 72 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 19 76 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 2 8 -1 \n", + "0087e094-8238- Ripple 150-250 1000 Winnie20220714 Winnie20220714 1000 CA1 tetrode_sample 20 80 -1 \n", " ...\n", - " (Total: 60265)" + " (Total: 60159)" ] }, - "execution_count": 12, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -1220,7 +2926,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 26, "id": "a601b1ac-0d37-4215-b9c6-5797bf21a1a0", "metadata": {}, "outputs": [], @@ -1239,7 +2945,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 27, "id": "3ac90755-f6b0-434c-ab57-6177e064a5f3", "metadata": {}, "outputs": [ @@ -1326,13 +3032,13 @@ " (Total: 3)" ] }, - "execution_count": 14, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "sgr.RippleParameters()" + "sgr.RippleParameters().insert_default" ] }, { @@ -1346,15 +3052,15 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 49, "id": "837a1c8d-87b6-42d9-9cfa-787ef63f7284", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'ripple_param_name': 'default',\n", - " 'ripple_param_dict': {'speed_name': 'head_speed',\n", + "{'ripple_param_name': 'default_trodes',\n", + " 'ripple_param_dict': {'speed_name': 'speed',\n", " 'ripple_detection_algorithm': 'Kay_ripple_detector',\n", " 'ripple_detection_params': {'speed_threshold': 4.0,\n", " 'minimum_duration': 0.015,\n", @@ -1363,13 +3069,13 @@ " 'close_ripple_threshold': 0.0}}}" ] }, - "execution_count": 15, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "(sgrip.RippleParameters() & {\"ripple_param_name\": \"default\"}).fetch1()" + "(sgrip.RippleParameters() & {\"ripple_param_name\": \"default_trodes\"}).fetch1()" ] }, { @@ -1400,27 +3106,136 @@ "## Check interval speed\n", "\n", "The speed for this interval should exist under the default position parameter\n", - "set and for a given interval.\n" + "set and for a given interval. We can quickly populate this here\n" ] }, { "cell_type": "code", - "execution_count": 16, - "id": "cc3c95f0-3fd0-440c-93f9-338f01a1d893", + "execution_count": 41, + "id": "3cc0fb45", "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'hdmf-common' version 1.5.1 because version 1.6.0 is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'core' version 2.4.0 because version 2.6.0-alpha is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'hdmf-experimental' version 0.2.0 because version 0.3.0 is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n" - ] - }, + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

interval_list_name

\n", + " descriptive name of this interval list\n", + "
\n", + "

trodes_pos_params_name

\n", + " name for this set of parameters\n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

position_object_id

\n", + " \n", + "
\n", + "

orientation_object_id

\n", + " \n", + "
\n", + "

velocity_object_id

\n", + " \n", + "
mediumnwb20230802_.nwbpos 0 valid timessingle_ledmediumnwb20230802_9GTXMUKTK1.nwb6d725947-3ba0-4cbe-9483-e77b897ba1ab848f5b77-cf49-41c2-906d-06b58a73108641cf08d3-f114-4201-b7d8-d6e541015b42
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*nwb_file_name *interval_list *trodes_pos_pa analysis_file_ position_objec orientation_ob velocity_objec\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "mediumnwb20230 pos 0 valid ti single_led mediumnwb20230 6d725947-3ba0- 848f5b77-cf49- 41cf08d3-f114-\n", + " (Total: 1)" + ] + }, + "execution_count": 41, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pos_key = {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"trodes_pos_params_name\": \"single_led\",\n", + " \"interval_list_name\": \"pos 0 valid times\",\n", + "}\n", + "sgp.TrodesPosSelection().insert1(pos_key, skip_duplicates=True)\n", + "sgp.TrodesPosV1.populate(pos_key, display_progress=True)\n", + "sgp.TrodesPosV1 & pos_key" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "cc3c95f0-3fd0-440c-93f9-338f01a1d893", + "metadata": {}, + "outputs": [ { "data": { "text/html": [ @@ -1442,12 +3257,13 @@ " \n", " \n", " \n", - " head_position_x\n", - " head_position_y\n", - " head_orientation\n", - " head_velocity_x\n", - " head_velocity_y\n", - " head_speed\n", + " video_frame_ind\n", + " position_x\n", + " position_y\n", + " orientation\n", + " velocity_x\n", + " velocity_y\n", + " speed\n", " \n", " \n", " time\n", @@ -1457,53 +3273,59 @@ " \n", " \n", " \n", + " \n", " \n", " \n", " \n", " \n", - " 1.635961e+09\n", - " 98.670000\n", - " 78.320000\n", - " 1.878849\n", - " -0.212384\n", - " -1.050933e+00\n", - " 1.072179\n", - " \n", - " \n", - " 1.635961e+09\n", - " 98.615000\n", - " 78.210000\n", - " 1.899349\n", - " -0.143244\n", - " -1.136351e+00\n", - " 1.145344\n", - " \n", - " \n", - " 1.635961e+09\n", - " 98.633333\n", - " 78.173333\n", - " 1.919567\n", - " -0.031501\n", - " -1.123425e+00\n", - " 1.123867\n", - " \n", - " \n", - " 1.635961e+09\n", - " 98.596667\n", - " 78.100000\n", - " 1.932884\n", - " 0.094982\n", - " -1.013202e+00\n", - " 1.017644\n", - " \n", - " \n", - " 1.635961e+09\n", - " 98.633333\n", - " 78.100000\n", - " 1.946067\n", - " 0.194273\n", - " -8.272934e-01\n", - " 0.849798\n", + " 1.625936e+09\n", + " 0\n", + " 180.251195\n", + " 162.885335\n", + " 1.584011\n", + " -1.062553\n", + " 1.052022\n", + " 1.495249\n", + " \n", + " \n", + " 1.625936e+09\n", + " 1\n", + " 180.090400\n", + " 163.287322\n", + " 1.579705\n", + " -1.162356\n", + " 0.674062\n", + " 1.343663\n", + " \n", + " \n", + " 1.625936e+09\n", + " 2\n", + " 180.143998\n", + " 163.421318\n", + " 1.581228\n", + " -1.218606\n", + " 0.243100\n", + " 1.242618\n", + " \n", + " \n", + " 1.625936e+09\n", + " 3\n", + " 179.983203\n", + " 162.938933\n", + " 1.576679\n", + " -1.243190\n", + " -0.085662\n", + " 1.246138\n", + " \n", + " \n", + " 1.625936e+09\n", + " 4\n", + " 180.197597\n", + " 162.670942\n", + " 1.582475\n", + " -1.230759\n", + " -0.224637\n", + " 1.251091\n", " \n", " \n", " ...\n", @@ -1513,103 +3335,105 @@ " ...\n", " ...\n", " ...\n", + " ...\n", " \n", " \n", - " 1.635963e+09\n", - " 96.323333\n", - " 71.500000\n", - " -2.265535\n", - " -0.415082\n", - " -1.486577e-05\n", - " 0.415082\n", - " \n", - " \n", - " 1.635963e+09\n", - " 96.286667\n", - " 71.500000\n", - " -2.158799\n", - " -0.413708\n", - " -3.243187e-06\n", - " 0.413708\n", - " \n", - " \n", - " 1.635963e+09\n", - " 96.250000\n", - " 71.500000\n", - " -2.034444\n", - " -0.374655\n", - " -6.383825e-07\n", - " 0.374655\n", - " \n", - " \n", - " 1.635963e+09\n", - " 96.250000\n", - " 71.500000\n", - " -2.034444\n", - " -0.307793\n", - " -1.133319e-07\n", - " 0.307793\n", - " \n", - " \n", - " 1.635963e+09\n", - " 96.250000\n", - " 71.500000\n", - " -2.034444\n", - " -0.229237\n", - " -1.813955e-08\n", - " 0.229237\n", + " 1.625937e+09\n", + " 44186\n", + " 38.483603\n", + " 113.574868\n", + " -1.402199\n", + " 1.008364\n", + " -0.865117\n", + " 1.328618\n", + " \n", + " \n", + " 1.625937e+09\n", + " 44187\n", + " 38.483603\n", + " 113.521270\n", + " -1.398606\n", + " 0.603148\n", + " -0.534938\n", + " 0.806192\n", + " \n", + " \n", + " 1.625937e+09\n", + " 44188\n", + " 38.430005\n", + " 113.574868\n", + " -1.416478\n", + " 0.256839\n", + " -0.219871\n", + " 0.338096\n", + " \n", + " \n", + " 1.625937e+09\n", + " 44189\n", + " 38.376407\n", + " 113.574868\n", + " -1.432157\n", + " 0.017772\n", + " 0.025349\n", + " 0.030958\n", + " \n", + " \n", + " 1.625937e+09\n", + " 44190\n", + " 38.376407\n", + " 113.628467\n", + " -1.435269\n", + " -0.107231\n", + " 0.174792\n", + " 0.205063\n", " \n", " \n", "\n", - "

48950 rows × 6 columns

\n", + "

44191 rows × 7 columns

\n", "" ], "text/plain": [ - " head_position_x head_position_y head_orientation \\\n", - "time \n", - "1.635961e+09 98.670000 78.320000 1.878849 \n", - "1.635961e+09 98.615000 78.210000 1.899349 \n", - "1.635961e+09 98.633333 78.173333 1.919567 \n", - "1.635961e+09 98.596667 78.100000 1.932884 \n", - "1.635961e+09 98.633333 78.100000 1.946067 \n", - "... ... ... ... \n", - "1.635963e+09 96.323333 71.500000 -2.265535 \n", - "1.635963e+09 96.286667 71.500000 -2.158799 \n", - "1.635963e+09 96.250000 71.500000 -2.034444 \n", - "1.635963e+09 96.250000 71.500000 -2.034444 \n", - "1.635963e+09 96.250000 71.500000 -2.034444 \n", + " video_frame_ind position_x position_y orientation \\\n", + "time \n", + "1.625936e+09 0 180.251195 162.885335 1.584011 \n", + "1.625936e+09 1 180.090400 163.287322 1.579705 \n", + "1.625936e+09 2 180.143998 163.421318 1.581228 \n", + "1.625936e+09 3 179.983203 162.938933 1.576679 \n", + "1.625936e+09 4 180.197597 162.670942 1.582475 \n", + "... ... ... ... ... \n", + "1.625937e+09 44186 38.483603 113.574868 -1.402199 \n", + "1.625937e+09 44187 38.483603 113.521270 -1.398606 \n", + "1.625937e+09 44188 38.430005 113.574868 -1.416478 \n", + "1.625937e+09 44189 38.376407 113.574868 -1.432157 \n", + "1.625937e+09 44190 38.376407 113.628467 -1.435269 \n", "\n", - " head_velocity_x head_velocity_y head_speed \n", - "time \n", - "1.635961e+09 -0.212384 -1.050933e+00 1.072179 \n", - "1.635961e+09 -0.143244 -1.136351e+00 1.145344 \n", - "1.635961e+09 -0.031501 -1.123425e+00 1.123867 \n", - "1.635961e+09 0.094982 -1.013202e+00 1.017644 \n", - "1.635961e+09 0.194273 -8.272934e-01 0.849798 \n", - "... ... ... ... \n", - "1.635963e+09 -0.415082 -1.486577e-05 0.415082 \n", - "1.635963e+09 -0.413708 -3.243187e-06 0.413708 \n", - "1.635963e+09 -0.374655 -6.383825e-07 0.374655 \n", - "1.635963e+09 -0.307793 -1.133319e-07 0.307793 \n", - "1.635963e+09 -0.229237 -1.813955e-08 0.229237 \n", + " velocity_x velocity_y speed \n", + "time \n", + "1.625936e+09 -1.062553 1.052022 1.495249 \n", + "1.625936e+09 -1.162356 0.674062 1.343663 \n", + "1.625936e+09 -1.218606 0.243100 1.242618 \n", + "1.625936e+09 -1.243190 -0.085662 1.246138 \n", + "1.625936e+09 -1.230759 -0.224637 1.251091 \n", + "... ... ... ... \n", + "1.625937e+09 1.008364 -0.865117 1.328618 \n", + "1.625937e+09 0.603148 -0.534938 0.806192 \n", + "1.625937e+09 0.256839 -0.219871 0.338096 \n", + "1.625937e+09 0.017772 0.025349 0.030958 \n", + "1.625937e+09 -0.107231 0.174792 0.205063 \n", "\n", - "[48950 rows x 6 columns]" + "[44191 rows x 7 columns]" ] }, - "execution_count": 16, + "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "pos_key = sgp.PositionOutput.merge_get_part(\n", - " {\n", - " \"nwb_file_name\": nwb_file_name,\n", - " \"position_info_param_name\": \"default\",\n", - " \"interval_list_name\": \"pos 1 valid times\",\n", - " }\n", - ").fetch1(\"KEY\")\n", - "(sgp.PositionOutput & pos_key).fetch1_dataframe()" + "from spyglass.position import PositionOutput\n", + "\n", + "pos_key = PositionOutput.merge_get_part(pos_key).fetch1(\"KEY\")\n", + "(PositionOutput & pos_key).fetch1_dataframe()" ] }, { @@ -1618,7 +3442,7 @@ "id": "1bc905e6-211a-4292-9f13-dfff906479a0", "metadata": {}, "source": [ - "We'll use the `head_speed` above as part of `RippleParameters`.\n" + "We'll use the `speed` above as part of `RippleParameters`. Ensure your selected ripple parameters value for `speed_name` matches for your data.\n" ] }, { @@ -1640,42 +3464,13 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 52, "id": "8ddee771-470f-44e4-b0ac-1d2eef60f66d", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[10:01:12][INFO] Spyglass: Computing ripple times for: {'lfp_merge_id': UUID('2f3c93d5-5d5d-2d47-75b3-c346dddbd312'), 'filter_name': 'Ripple 150-250 Hz', 'filter_sampling_rate': 1000, 'nwb_file_name': 'tonks20211103_.nwb', 'target_interval_list_name': 'test interval', 'lfp_band_sampling_rate': 1000, 'group_name': 'CA1_test', 'ripple_param_name': 'default', 'pos_merge_id': UUID('68959dc8-f8a3-c3c0-a534-096b3bc10f6c')}\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'hdmf-common' version 1.5.1 because version 1.6.0 is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'core' version 2.4.0 because version 2.6.0-alpha is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/spec/namespace.py:531: UserWarning: Ignoring cached namespace 'hdmf-experimental' version 0.2.0 because version 0.3.0 is already loaded.\n", - " warn(\"Ignoring cached namespace '%s' version %s because version %s is already loaded.\"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_0' has data shape (66792, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_1' has data shape (48950, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_2' has data shape (98507, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_3' has data shape (44892, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_4' has data shape (82313, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_5' has data shape (81566, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/pynwb/behavior.py:46: UserWarning: SpatialSeries 'series_6' has data shape (83811, 4) which is not compliant with NWB 2.5 and greater. The second dimension should have length <= 3 to represent at most x, y, z.\n", - " warnings.warn(\"SpatialSeries '%s' has data shape %s which is not compliant with NWB 2.5 and greater. \"\n", - "[10:01:14][INFO] Spyglass: Writing new NWB file tonks20211103_41QOWS4VUO.nwb\n" - ] - } - ], + "outputs": [], "source": [ "key = {\n", - " \"ripple_param_name\": \"default\",\n", + " \"ripple_param_name\": \"default_trodes\",\n", " **rip_sel_key,\n", " \"pos_merge_id\": pos_key[\"merge_id\"],\n", "}\n", @@ -1693,7 +3488,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 53, "id": "676b05bd-e8ec-41de-9df0-a823cd90aca3", "metadata": {}, "outputs": [ @@ -1730,248 +3525,198 @@ " \n", " \n", " 0\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 1\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 2\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 3\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 4\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 5\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 6\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 7\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 8\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 9\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 10\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 11\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 12\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 13\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 14\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 15\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 16\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 17\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 18\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 19\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 20\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 21\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 22\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 23\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 24\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 25\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 26\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 27\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 28\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 29\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 30\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 31\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 32\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 33\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 34\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 35\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 36\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 37\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", " 38\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 39\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 40\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 41\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 42\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 43\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 44\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 45\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 46\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 47\n", - " 1.635961e+09\n", - " 1.635961e+09\n", - " \n", - " \n", - " 48\n", - " 1.635961e+09\n", - " 1.635961e+09\n", + " 1.625936e+09\n", + " 1.625936e+09\n", " \n", " \n", "\n", @@ -1980,58 +3725,48 @@ "text/plain": [ " start_time end_time\n", "id \n", - "0 1.635961e+09 1.635961e+09\n", - "1 1.635961e+09 1.635961e+09\n", - "2 1.635961e+09 1.635961e+09\n", - "3 1.635961e+09 1.635961e+09\n", - "4 1.635961e+09 1.635961e+09\n", - "5 1.635961e+09 1.635961e+09\n", - "6 1.635961e+09 1.635961e+09\n", - "7 1.635961e+09 1.635961e+09\n", - "8 1.635961e+09 1.635961e+09\n", - "9 1.635961e+09 1.635961e+09\n", - "10 1.635961e+09 1.635961e+09\n", - "11 1.635961e+09 1.635961e+09\n", - "12 1.635961e+09 1.635961e+09\n", - "13 1.635961e+09 1.635961e+09\n", - "14 1.635961e+09 1.635961e+09\n", - "15 1.635961e+09 1.635961e+09\n", - "16 1.635961e+09 1.635961e+09\n", - "17 1.635961e+09 1.635961e+09\n", - "18 1.635961e+09 1.635961e+09\n", - "19 1.635961e+09 1.635961e+09\n", - "20 1.635961e+09 1.635961e+09\n", - "21 1.635961e+09 1.635961e+09\n", - "22 1.635961e+09 1.635961e+09\n", - "23 1.635961e+09 1.635961e+09\n", - "24 1.635961e+09 1.635961e+09\n", - "25 1.635961e+09 1.635961e+09\n", - "26 1.635961e+09 1.635961e+09\n", - "27 1.635961e+09 1.635961e+09\n", - "28 1.635961e+09 1.635961e+09\n", - "29 1.635961e+09 1.635961e+09\n", - "30 1.635961e+09 1.635961e+09\n", - "31 1.635961e+09 1.635961e+09\n", - "32 1.635961e+09 1.635961e+09\n", - "33 1.635961e+09 1.635961e+09\n", - "34 1.635961e+09 1.635961e+09\n", - "35 1.635961e+09 1.635961e+09\n", - "36 1.635961e+09 1.635961e+09\n", - "37 1.635961e+09 1.635961e+09\n", - "38 1.635961e+09 1.635961e+09\n", - "39 1.635961e+09 1.635961e+09\n", - "40 1.635961e+09 1.635961e+09\n", - "41 1.635961e+09 1.635961e+09\n", - "42 1.635961e+09 1.635961e+09\n", - "43 1.635961e+09 1.635961e+09\n", - "44 1.635961e+09 1.635961e+09\n", - "45 1.635961e+09 1.635961e+09\n", - "46 1.635961e+09 1.635961e+09\n", - "47 1.635961e+09 1.635961e+09\n", - "48 1.635961e+09 1.635961e+09" + "0 1.625936e+09 1.625936e+09\n", + "1 1.625936e+09 1.625936e+09\n", + "2 1.625936e+09 1.625936e+09\n", + "3 1.625936e+09 1.625936e+09\n", + "4 1.625936e+09 1.625936e+09\n", + "5 1.625936e+09 1.625936e+09\n", + "6 1.625936e+09 1.625936e+09\n", + "7 1.625936e+09 1.625936e+09\n", + "8 1.625936e+09 1.625936e+09\n", + "9 1.625936e+09 1.625936e+09\n", + "10 1.625936e+09 1.625936e+09\n", + "11 1.625936e+09 1.625936e+09\n", + "12 1.625936e+09 1.625936e+09\n", + "13 1.625936e+09 1.625936e+09\n", + "14 1.625936e+09 1.625936e+09\n", + "15 1.625936e+09 1.625936e+09\n", + "16 1.625936e+09 1.625936e+09\n", + "17 1.625936e+09 1.625936e+09\n", + "18 1.625936e+09 1.625936e+09\n", + "19 1.625936e+09 1.625936e+09\n", + "20 1.625936e+09 1.625936e+09\n", + "21 1.625936e+09 1.625936e+09\n", + "22 1.625936e+09 1.625936e+09\n", + "23 1.625936e+09 1.625936e+09\n", + "24 1.625936e+09 1.625936e+09\n", + "25 1.625936e+09 1.625936e+09\n", + "26 1.625936e+09 1.625936e+09\n", + "27 1.625936e+09 1.625936e+09\n", + "28 1.625936e+09 1.625936e+09\n", + "29 1.625936e+09 1.625936e+09\n", + "30 1.625936e+09 1.625936e+09\n", + "31 1.625936e+09 1.625936e+09\n", + "32 1.625936e+09 1.625936e+09\n", + "33 1.625936e+09 1.625936e+09\n", + "34 1.625936e+09 1.625936e+09\n", + "35 1.625936e+09 1.625936e+09\n", + "36 1.625936e+09 1.625936e+09\n", + "37 1.625936e+09 1.625936e+09\n", + "38 1.625936e+09 1.625936e+09" ] }, - "execution_count": 18, + "execution_count": 53, "metadata": {}, "output_type": "execute_result" } @@ -2041,6 +3776,65 @@ "ripple_times" ] }, + { + "cell_type": "markdown", + "id": "a219a82c", + "metadata": {}, + "source": [ + "We can also inspect the lfp trace at these ripple times.\n", + "\n", + "* *Note: The ripple detection algorithm depends on estimates of the standard deviation of power in the ripple band. Running analysis on longer intervals will lead to better estimates of this value, and thereby better segmentation of ripple events*" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "id": "ee9324f0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Voltage (uV)')" + ] + }, + "execution_count": 103, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkcAAAG0CAYAAAA4rYPdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9eZxkR3klDJ+4a2bW1lW9tySEZAk8GIwxMGDm9bCDeQ3YZrENY78wgw3z8dmvMWb4zNgehGcMg40B23iZ8QYYGIGNBZhNAgxaLAOS0I6WltStXqura8nt7rF8f9yIuHFvZnVXS1Vd6uo4v19LlZk3MyNv3ow4cZ7zPA8RQghYWFhYWFhYWFgAAJzNHoCFhYWFhYWFxWMJlhxZWFhYWFhYWBiw5MjCwsLCwsLCwoAlRxYWFhYWFhYWBiw5srCwsLCwsLAwYMmRhYWFhYWFhYUBS44sLCwsLCwsLAxYcmRhYWFhYWFhYcCSIwsLCwsLCwsLA5YcWVhYWFhYWFgYOKfI0XXXXYdXvOIV2LdvHwgh+NznPld7/I1vfCMIIbV/z372s2vHZFmGX/3VX8WOHTswMTGBV77ylThy5MhZ/BQWFhYWFhYWj2V4mz2AM0EURXjqU5+K//gf/yNe/epXjz3mJ37iJ/C3f/u3+nYQBLXH3/a2t+Gf/umfcOWVV2L79u34jd/4Dbz85S/HLbfcAtd11zQOzjmOHTuGqakpEEIe+QeysLCwsLCwOGsQQmAwGGDfvn1wnFPoQ+IcBQBx1VVX1e57wxveIH7qp35q1ed0u13h+7648sor9X1Hjx4VjuOIr371q2t+78OHDwsA9p/9Z//Zf/af/Wf/nYP/Dh8+fMp1/pxSjtaCb33rW9i1axe2bduG5z73ufi93/s97Nq1CwBwyy23oCgKvOQlL9HH79u3D09+8pNx44034qUvfenY18yyDFmW6dslNwMOHz6M6enpDfw0FhbnIKII2Lev/PvYMWBiYnPHcz7AnnMLizWh3+/joosuwtTU1CmP21Lk6GUvexle+9rX4uKLL8aBAwfwO7/zO3jBC16AW265BWEYYn5+HkEQYHZ2tva83bt3Y35+ftXXfd/73of3vOc9I/dPT09bcmRh0YQZnp6etgv12YA95xYWZ4TTWWK2FDn6uZ/7Of33k5/8ZDzjGc/AxRdfjC996Ut41ateterzhBCnPFHvete78Pa3v13fVszTwsLCwsLCYuvhnMpWO1Ps3bsXF198Mfbv3w8A2LNnD/I8x8rKSu24hYUF7N69e9XXCcNQq0RWLbKwsLCwsNja2NLkaGlpCYcPH8bevXsBAE9/+tPh+z6+9rWv6WOOHz+Ou+66C895znM2a5gWFhYWFhYWjyGcU2G14XCIBx54QN8+cOAAbrvtNszNzWFubg5XXHEFXv3qV2Pv3r04ePAg/ut//a/YsWMHfuZnfgYAMDMzgze96U34jd/4DWzfvh1zc3N4xzvegac85Sl40YtetFkfy8LCwsLCwuIxhHOKHN188814/vOfr28rH9Ab3vAG/Pmf/znuvPNOfPzjH0e328XevXvx/Oc/H5/+9KdrrvQPfehD8DwPP/uzP4skSfDCF74QH/3oR9dc48jCwsLCwsJia4MIlZdusWb0+33MzMyg1+tZ/5GFRRNRBExOln8PhzZz6mzAnnMLizVhrev3lvYcWVhYWFhYWFicKSw5srCwsLCwsLAwYMmRhYWFhYWFhYUBS44sLCwsLCwsLAxYcmRhYWFhYWFhYcCSIwsLCwsLCwsLA5YcWVhYWKwCwTl4lm32MCwsLM4yLDmysLCwWAWHf+mX8MCLXgQex5s9FAsLi7MIS44sLCwsVkFy2+1gJxdRzM9v9lAsLCzOIiw5srCwsFgFgtLyD8Y2dyAWFhZnFZYcWVhYWKwCURTl/znf5JFYWFicTVhyZGFhYTEGgjFAtZ4cLm3uYCwsLM4qLDmysLCwGAMdUgMgopVNHImFhcXZhrfZA7CwsLB4LELkOfb/wKswnNiHx8nwmoWFxfkBS44sLCwsxkDkKY7t/XdgXgv9XhcTmz0gCwuLswYbVrOwsLAYA5EMwNyg/JvabDULi/MJVjmysLCwGAMWxRDgAM/ADf+RhYXF1oclRxYWFhZjQOME+eAzEGwZefrKzR6OhYXFWYQlRxYWFhZjUERDCLYIgCKKBps9HAsLi7MI6zmysLCwGIMiSgGUXiPObBFIC4vzCZYcWVhYWIxBFiUAyiKQnFnPkYXF+QRLjiwsLCzGII9i/XeUJ5s4EgsLi7MNS44sLCwsxiCPU/13VuSbOBILC4uzDUuOLCwsLMYgSypyxJitc2RhcT7BkiMLCwuLMUgMciSsIdvC4ryCJUcWFhYWY5Bmmf6bc7GJI7GwsDjbsOTIwsLCYgyyvGo2a8NqFhbnFyw5srCwsBiDLK/S922dIwuL8wuWHFlYWFiMAS0qtUhwS44sLM4nWHJkYWFhMQaFEUqzniMLi/MLlhxZWFhYjAGllVpklSMLi/MLlhxZWFhYjAFllVpkuZGFxfkFS44sLCwsxoAZobRTKUeCC2vYtrDYYrDkyMLCwmIMTJ8RF6t7jj73oVvxf373u5YgWVhsIXibPQALCwuLxyJ4TTkaT44EFzi2vwsASPo5JmZbZ2NoFhYWGwyrHFlYWFiMgWE5WjVbjVEjo224uNFDsrCwOEuw5MjCwsJiDIQRShOrhNW40WJE0GLsMRYWFuceLDmysLCwGAPTQbSa5YjlRv81SscfZGFhcc7BkiMLCwuLcagpR+MPYVmu/+bU9l+zsNgqsOTIwsLCYgy4oR2tGlazypGFxZaEJUcWFhYW47AGzxFNU/13sbyy4UOysLA4O7DkyMLCwmIMBIR5YyxYklSHFNaQbWGxVWDJkYWFhcUYiFpYbfwxLK6UI1bYsJqFxVaBJUcWFhYWYyBERY5Wq5BNa8qRNWRbWGwVWHJkYWFh0UBZ9PH0ZIelRraaDatZWGwZWHJkYWFh0QArOCCMMNlqYbXUyFazypGFxZaBJUcWFhYWDdCCwVSO1lbnyHqOLCy2Ciw5srCwsGiA5hzCUI5W4UagWRVKs+TIwmLrwJIjCwsLiwZY3lSOVmk8a5AjQfnYYywsLM49WHJkYWFh0QBN0rrnaBWwvDqG2fYhFhZbBt5mD8DCwsLisQaapQCMsNpqnqOCgaa3AiKFYBecncFZWFhsOM4p5ei6667DK17xCuzbtw+EEHzuc5+rPS6EwBVXXIF9+/ah3W7jec97Hu6+++7aMVmW4Vd/9VexY8cOTExM4JWvfCWOHDlyFj+FhYXFYx15FGN1p1EFmlHQ5FrQ9F+RJvHGD8zCwuKs4JwiR1EU4alPfSo+8pGPjH3893//9/HBD34QH/nIR3DTTTdhz549ePGLX4zBYKCPedvb3oarrroKV155JW644QYMh0O8/OUvB2NWErewsCgRD/q126sasgsKyEra1FbItrDYMjinwmove9nL8LKXvWzsY0IIfPjDH8Zv/dZv4VWvehUA4GMf+xh2796NT33qU3jLW96CXq+Hv/7rv8bf/d3f4UUvehEA4BOf+AQuuugifP3rX8dLX/rSs/ZZLCwsHruIoiY5Gk+PirzaVFnPkYXF1sE5pRydCgcOHMD8/Dxe8pKX6PvCMMRzn/tc3HjjjQCAW265BUVR1I7Zt28fnvzkJ+tjxiHLMvT7/do/CwuLrYskGtbvWEU6ogYhsuqzhcXWwZYhR/Pz8wCA3bt31+7fvXu3fmx+fh5BEGB2dnbVY8bhfe97H2ZmZvS/iy66aJ1Hb2Fh8VhCGie126uG1QxyxK1yZGGxZbBlyJECIaR2Wwgxcl8TpzvmXe96F3q9nv53+PDhdRnrVsX8g/tx97Xf2OxhWFg8YqRpcvqDADCjthHnts6RhcVWwTnlOToV9uzZA6BUh/bu3avvX1hY0GrSnj17kOc5VlZWaurRwsICnvOc56z62mEYIgzDDRr51sPVf/FHWDx0ELsvvQw7Lrp4s4djYXHGyI2eacCplCODHDFLjiwstgq2jHJ0ySWXYM+ePfja176m78vzHNdee60mPk9/+tPh+37tmOPHj+Ouu+46JTmyODOkw0Ht/xYW5xqyBjlaDdRQi6znyMJi6+CcUo6GwyEeeOABffvAgQO47bbbMDc3h8c97nF429vehve+9724/PLLcfnll+O9730vOp0OXv/61wMAZmZm8KY3vQm/8Ru/ge3bt2Nubg7veMc78JSnPEVnr1k8enC5SHC7WFicoyiMhrKnAmdi7N8WFhbnNs4pcnTzzTfj+c9/vr799re/HQDwhje8AR/96Efxzne+E0mS4K1vfStWVlbwrGc9C9dccw2mpqb0cz70oQ/B8zz87M/+LJIkwQtf+EJ89KMfheu6Z/3zbFXkabmwFPnaFhgLi8caaF7Ubq+Wyk+F9RxZWGxFnFPk6HnPe96qDSCB0ox9xRVX4Iorrlj1mFarhT/5kz/Bn/zJn2zACC0AgMmFZeW4DatZnJsYKei4yrTDefVAxq1SamGxVbBlPEcWjx0IuZtu7r4tLM4V8GJtRIca5MgqRxYWWweWHFlsAMqFxZIji3MVzWrXq+nV5lHMZqtZWGwZWHJksa7gRmiBFpYcWZybEHRtRIeZnqNThPwtLCzOLVhyZLGuYLTyarA1ZvxYWDzWwEf8Q+OJjxFVq/mPLCwszm1YcmSxrjBbKGQnT27iSCwsHgUUORLlFLka7TH5kBWOLCy2Diw5slhXMCOUxqznyOIchTJXO1AlPlZRjoz7bVjNwmLrwJIji3UFzauwmvUcWZyzEOV1THBq5UhYcmRhsSVhyZHFusIs/Jjn6SaOxMLikUNI5YjoKXI1z1F1/6lqsFlYWJxbsOTIYl1hpu+nebyJI7GweBQQpefIEaeZIompHG3kgCwsLM4mLDmyWFeYYTUzc83C4lyCKmRahdVO7zmyypGFxdaBJUcW6wqzYadtPGtxzmKtypGwniMLi60IS44s1hU0yfTforB1jizOUWjliKg7xh+Gqgik5UYWFlsHlhxZrCuKpDJhW+XI4txFyXQIOcURjNXJ0UYPycLC4qzBkiOLdUWWViZs24jT4pyFloFKdjTOc8TzDGZ3Nes5srDYOrDkyGJdkSeJ/lsIS44szm1UypEYIT8sq5MjCwuLrQNLjizWFZlJjqxyZHHOQobV1E0CoBEm5nmmjdvlM6xyZGGxVWDJkcW6osgqz5ENM1ics1DXrmRHAmKE7LMsg7BhNQuLLQlLjizWFXlqkiOrHFmc2zDDak3liOWZbjMij7CwsNgisOTIYl1RGIZsu5O2OHfRzFYTEKxO9nmeo2bItvTIwmLLwJIji3UFzY06R3axsDjHQRwjW43XlSOa1j1HFhYWWweWHFmsK5jReBY2rGZxrkKqnkTPkAKiGVZLGp4juxmwsNgysOTIYl1BDXJkFwuLcxeKHBlVIFlTOUoaypG93i0stgosObJYV1Ba6L+t58ji3EV57TqKHAk+4jliSQbAMGTby93CYsvAkiOLdQUvqHlr08ZhYfHoUDKdhPLqdsNzlCeJrXNkYbFFYcmRxbqCUTO12ZIji3MbmVSLxJhstThJUK+QbcmRhcVWgSVHFmeE9N57sfLpz6waMqs1m7VxBoszRH7wIJY/9SmZJr+ZEPK/orrNaO2IJI5RJ0T2erew2CrwNnsAFucWHr7if2Bw/xE86QmXo/O0p408zpnZpdwqRxZnhoU//EMMvvZ1+Lt2YepFL9q0cSjyz3W6moAo6oQtMQqeyiPOxtAsLCzOAiw5sjgjfNd9Nvr/9klw77kfTxtLjswwgyVHFmcGurgEAGC93iaPpCQ6FICrbrOidkSeZGOfY2Fhce7DhtUszgiZPwcAWDjZH/t4vf+ULZBncWbgUQQAEEVxmiM3Gko5Ivp2c0xZ47ZVjiwstg4sObI4I2TsYeTDL6AY2TWXELzuwWg267SwOBV4XLafEQU9zZGPHIe/v4wvfuR2DJbTsY+XIbXyOmZGWK3pOSryJoGz5MjCYqvAkiOLM0JOvw9ePIDB8sLYx0c6lzOrHlmsHRU52jjl6K7rj+Lhu5bw0G0nxx8g9H9ADeWINTxHed4gcLYivIXFloElRxZnBCHrurBVdva8kaHG6WaHR7YOiuPHkT300GYPY0Ohw2p045QjmpXXcBaf6houiQ6t+oeMGLJpUSf+NqxmYbF1YMmRxRmiXDRYc9esH64vEHTTvSNbBwdf/x9w4NWv0QRiq0FQCpFl8u+Nu25oUV7DWbzKexgCEJN2bKDRNxAAZU2lyJIjC4utAkuOLM4IKj2frxIua9Y/opklR+sBkeegx49DJAlYt7vZw9kQ8CTRf29kWK0iR6dQjmSIjKHqrdbcEDDaJEM2rGZhsVVgyZHFmlH6h0pSxOkq5Kixe87T8cZtizODSRw2v0DixqCmiG1gWG24dA+ywacxWFrNN1cZsqlTTZF5Vjdws5FkA95ISDg/cOvCrXjjV9+I+5bv2+yhWFisGyw5slgzzF5SzVYKGg3lqIjHZwRZnBnOlqqymeBxjMHkhXjo8f83inTjjPzLhz4NQY/iyF2fHPu4MAzZTFRTZCTN4gqcNYmQWHXTsJXxze9+Gpd95rv45m1XbfZQLCzWDZYcWawZaRxBK0erpOg3w2pFFI89zuLMwI2FecuSoyjGg5f+FA4+/idxPJra8PfL4+Nj768pR8YU2RsmjePK/xNRht6EYOBs4xSvxyouvvp+PO+eJ2LX1+7a7KFYWKwbLDmyWDPi/gC6ON4ayVEeJ2OPszgzcINkii0cVktb2wEAG8r/iK//HNcjsAwfK3JUGbLjqK6CchlCIzBbjGxN4noqRNlTcftTfxVJ95LNHoqFxbrBkiOLNSMZDvTfqxd3bChHiQ2rrQd4svWVIxZFSMNZAONCVusHx53Tfw+WRmsdUYMcMVJ1WIqzun9OSMWIaNM2Py+VI05DAADLgk0eiYXF+sGSI4s145GQI7pKJW2LM0MtrDZSmXlrIO0l4G65wI6andcHQoiaLe6OX/918AbpMctPUOIBkvwkSV2xU6oTIRU52sgSBI9VCC4/f2GXE4utA3s1W6wZybDKJlqt4N1IWC21ytF6QNQM2VszrDZcqa4VlW6/3ihDYZVp+sSxY0huu71+DK3OL4MHNU0mWZMclf/XypHg4PT8S+cXQqprwj/1gRYW5xAsObJYM0z/0DivhnykdssqR+uD80E5inrV5xpmww15D5ZznXEJAN12CBLUF3VqeLoo8aGUo6xBjhQ7cpxKOWJbNOR5apTkSIhwk8dhYbF+8E5/iIVFidxIy1+9VUKzCKQlR+sBHidgBBCEbFnPUTSsSMtGeY5owSEM5WjQckfOZ2GExpgRVsub510pR44j6z/yLavqnQpCm9at58hi68CSI4s1IzNDO2sMq9EtqnKcbfA4xo2XX4jMc/G6ZGuWRzCrPnBBVj/wUYAWrKYcMZegt3QSE8YxjFaEngoXihwVjQrZ6konjtGc9rwMI0vliFjlyGLrwJIjizXDVIHEqq0S1JLhAyhG+lFZPDIU0RCDdrn4mMb4rYQorQjRRjW4ZwUHUCc5aVQP4dGiEVYjBBBj+gTKQRKjivb5eL0LuYxwpwUhhGFQt7A4d2E9RxZrRpGaE/9qYQ9VGa/0cRSrLBZfeuhL+OPv/fEpvEsWJjKDENEtqk5EeVVTSGyYcmR6jsr3YKfIVjPDarRopunLbDW3mkbpedguh4kERfR1UBS6cbCFxbkOqxxZrBmmUfV0yhGBB9F4jok/+M77sJT38IofeAUumbHF406H3Og7tlXViZQF0IlfG0SOSuVIkSMfQA7ePVo7xqxVlBvkiPFGaxCpHHFTOcq2JnE9FXJ6AIzdj9T3wQcDOK3WZg/JwuJRwypHFmsGzerkaJzqI1SoQZo0V1OOhlkPADBYObDew9ySyAxytBVN7kIIpOgYtzeGHOUZReWkLtVNXjSVo4ocMafyHJGR67281vt5tVGgzYy28wDK4M4JRdHvbfJoLCzWB5YcWawZzFg0BNgqWVN1csTGZO9wwZHJtS8ejO9vZVFHllZm+HHn9FxH9p0rwWttPTZmasqNiu1Ek6P6dcyYOr8EAo6uY9QkR2ojwImpHG297+Z0UMkZAgWi5eVNHo2FxfrAkiOLNYPXMs/YKv6CcsFw5KU1YmIFkOWVChLn/fUc4paFSY62onI0PHKodnujDNk135wkR1Ea1Y5hVG0CiCQ+khyNVO0ubzPjmHEhTyEEvvint+OLf3r7lvTY6cxVkWF4crQdi4XFuQhLjizWDGZ4MYTgEA1jcOnJUMpReWmxMe0UsnRF/x3nWzPzar1hFiCMs623wA6jeiHGDQur1ZIKSnVzmNaz1erkiEARHwfNUhUqnORATaXZGOKaRgUevnMJD9+5hCJlI4+vFUIIfOvv/hp3fOOrj/g11htc8Mp/KHIkS0ubOyALi3XCliJHV1xxBQghtX979uzRjwshcMUVV2Dfvn1ot9t43vOeh7vvvnsTR3xugRfmxE5HelJlRYrKkC130nS0EWeaVNJ7UmxMJeSthsRoS9Gst7MVMIzquSGCuKsc+ehANTlyAfkerNEstrotlSOiXeJgvCJHVVitIlDHF+sqFACkQyP7jT1ySaw7fwy3fPEqXPeJv33Er7HeKHgBkPIzCZEjXu5u7oAsLNYJW4ocAcAP/dAP4fjx4/rfnXfeqR/7/d//fXzwgx/ERz7yEdx0003Ys2cPXvziF2MwsOrFWsBrEztDMawvBElmVPGTawgdR47SHvyCoJO4iPPRxcRiFKmxKGfFFgyrxR44WwaY+mwbQ45yTegrLxFvXKPmbVFTjgQoN5Ujla3m6tc6tjQaJo6OV2rKo2mnk8kWMlkcgTcz5zYJGc0M5ShD1rebHYutgS1HjjzPw549e/S/nTt3AihVow9/+MP4rd/6LbzqVa/Ck5/8ZHzsYx9DHMf41Kc+tcmjPjcgWH1Cjnr1zJTYqNysgiIrw3TEZ5FmK3jpd3bjVdfuQzSwnqO1IDfCTNk6p/I/Fnwwx+cPI+9/FCK9GQAgNmhqyiQ5KclM+R5NckQp08dwo6AhaShHagfAjdc62U3QRP/2e6vXjh55qv9KryIeZp/DzUSaZ4Aoz58QOfLh+VfKwGJrYsuRo/3792Pfvn245JJL8PM///N46KGHAAAHDhzA/Pw8XvKSl+hjwzDEc5/7XNx4442nfM0sy9Dv92v/zkcIXl9Eo+XF2u3UzARSzxEcRaNPVpr2MTfswOMOksXHxiT/WIepN1C2fqrBDfsX8Yz/8XV89a75dXvNR4IkLhd+zk4AMPt1rS+Usd0RFaFphrq4EVZjMAzZQEM5kscbx4xrPDt84LD+m2WPvJ3OwROVVy+LHxsKTZok0HWjRI4iPv+y9Sy2JrYUOXrWs56Fj3/847j66qvxl3/5l5ifn8dznvMcLC0tYX6+nPx3795de87u3bv1Y6vhfe97H2ZmZvS/iy66aMM+w2MZvLGIJN162m6aGUTHUcZsjpTWF/M078ORi0yxdG7VIV1MFvH2b70d3zn+nbP2noJSUEPBoHT9UrmuvX8BS1GO6/dvbpaRInyCl4v+RnmOMnmNEgGAqKSBuvmbs6qCthlWI8AqypGR7j8m3DU8VJWreDTkaLFbESIVYttspMOBUXGcokgeG+E+C4tHiy1Fjl72spfh1a9+NZ7ylKfgRS96Eb70pS8BAD72sY/pY5p9f9bSC+hd73oXer2e/nf48OFTHr9V0Qy/JL1u7XamlSNHp/cSCGRFfTFP0z6UXyPtMvzrg+dOhss1B6/B1x7+Gv7Pvf/nrL0nTxJQo0UFG0kpf+RYjsrFOi02KHd+jSgkIeFCLvrE3ZBwX66VIwDy9QvWJEerpPILAWqce13fxyBQaHw3PI6RLBqtX7JHbqZfMshROnyMKEeDni4CCQBFvrnXkYXFemFLkaMmJiYm8JSnPAX79+/XWWtNlWhhYWFETWoiDENMT0/X/p2PaIbVmhN0pRw5cvUpF5SsoRxFcbVYFBHHr/6fW9d/sBuEhXgBAJDS9fVWrKQrONQ/NPYxHiegxi/10WQ8NdGVYZDmd3S2oQgfRwIhODhxgTFm/keLQhqyiSiJOwAwVp8GlUJKIDPRDO40TjkyQ29oKEfJ7bejcKt2Gjx55AkIXcPsHD1GyFEWRdpzBABFsfn+NQuL9cCWJkdZluGee+7B3r17cckll2DPnj342te+ph/P8xzXXnstnvOc52ziKM8hNCrzZXF9os8jRXoccKImSYGsEQZKDeO2nxO9QJ8LWExKn1XO13fMv3TNL+GnP//TWDFqQCnwOAIj1aLDxvheHimW5bnfbOWoUmQEIGII4q5Sgf1Rvo9Sjoz7mqSfMfW+9TpHq4fVqmw1IuptdeKbbkbhT1Tv/yiM1N1hV/+dRGcvy5OfoiJnEQ0BQznaZI5tYbFu2FLk6B3veAeuvfZaHDhwAN/5znfwmte8Bv1+H294wxtACMHb3vY2vPe978VVV12Fu+66C2984xvR6XTw+te/frOHfk6gGebI47p6opqjEjgQjkGOGgtvYiwQXlGaXNdzwd9IaHLE1pccHR4cRsELHI9G26mIJAEn1apzqsXqTNGNSyKw2coRN8NVfADhuBAboByp3meOgK5fxEfIkRoLqbUGGSFH8k9umMcdwWvHxLfcgsKryNGjaS8yjCuPX3SWyo9cd+Q6/NinfgxXH7x67ON5ktSUI7ZBxTstLM42zi037Glw5MgRvO51r8Pi4iJ27tyJZz/72fj2t7+Niy++GADwzne+E0mS4K1vfStWVlbwrGc9C9dccw2mpqY2eeTnBprkqMjrNVtypQiRBjlqGrKTaoHw5GM55WgHG2PCXU+cTErj8nqSIyGEDtNFxagiwKKoQY7WUTmKZFhtk5Uj8zMJPgAncxuiHKm+dISLVckR16n8gAAB0QG4Zp2jSjkyfUkFE/DkpUwXFkDnDHKUPrJwrBBCm8kBYDDoPqLXOVPcPH8zYhrj28e/jZc+/qUjj5clBapr05Iji62CLUWOrrzyylM+TgjBFVdcgSuuuOLsDGjLob6I9Af1ib7Q4TIC7q4eVssNU6ojza/nCjlSylHB12/hppxqc29cjGYh5YM+zHO/XuSIMo5+Kg3Zm6wcmaEtwYcQZOeGKEeqebIDAUVomqeTN3urEXWLrBpWqwpFcuSMoy3VJMEYCq8Dlt9fvn/+uEc07qUoh2MQ8sFgNPy6EchkUc7BKm1+iqTRQmjDR2RhcXawpcJqFhuLpnLUG6aIDKJDU1VgrzJkA3yEHBV5NYWqCT9bx9o9GwXKqfYE5etoyE5ZitmBwEUnBWI6So7SlW7t9nppPL2kAOEMe9PjyDe5m7wQ9bAad1yIYv3JkSI+RAjtE2qSI7PYqek5ghCgsmZX+VtQhuyK1BMIFKZhnlLkno8i+jKK6MsjoWgTK/PHMFhaHPvY0ZUEvqgIeRydnVprpyNHeaPiNydkQxQ/C4uzDUuOLNYM0VCOiAAGqUGO1AIrCLhTGWyzok58coMcEcHg8QL5Otbu2Sgsp8v6HGTx+tUFyliG376S4f1/w5AuLow8nvbrlcjXy561Ehf4ocE9eM3xz+HCI2evbtN4NJUjF6JYf8KmahgRCIPzNMJqOlttdc9R2XBVkiPiGOVA6uSIMQFKMpSUlqNYJay2fOwI/ubX3owr/9t/Gfv40W4CzyBHaXR2PEdKIV2NHNGGckQdgD1GMuksLB4NLDmyWDsaRmAHQGQ0QVVmUwKiPUdiTFiNNvwtHRqNHPNYhPIbAUDB10/VSGmKPSuAxwF64sTI43GjInuTpD5SrMQ5npk9DAB4Yv/gurzmI0VTORIblMqviI9Te7/mMfJ9BYEA0d4kIaqsutIUr5QjT3M7R4ga0c/hQ/CK3BartH654ZMfBQD0F0+O7Zt2rJvAN665PD472WqnU45o2uhLRwS4JUcWWwCWHFmsGdWi7Kg7kBgqEMvLXSYhBEIrR6NhtaaXeUYMzgnlaDGuQh75OmaMpckQvjyN+Zi2EFG/Hmpbr3deiXJso+XiF9LNDoWY3e5VWG39xyS0csQrQ/ZIWM1QfszeaobniKMiR5S4+rWaylEh6uSIjSFHg+VF7L/52/q2Ck+bOLKSwBNGynx6dtruKHJ0Mu7idf/721ho+AxpXidHzBHgtpG3xRaAJUcWZwA16fsAyjCD6TlSZleISjnCmCKQorE+bGPDc4McJYvwKMHT7p9Be3DmuQxXfvcQ/un2YyP35/3KXFuMKRIYN5qVCrJ+ypEj1QixyV3eTeUIPAIH2RBDtjJ+E3AdCmuezXoRSKPAI6reaoxR/UxqZGgRIZDT6hULBDVyRPNRwnfLF6+q3abRKEE+2lCOeDZKoDYCfOjgmYf+bzhDD//60CL+5YG6J4o1yBEHAxtY5cji3MeWylaz2FgISY4I8SFEBgggNvxEXO70CUrPkSuf00wTF401b5rGyNex6vNG4WRyEpccm8BTH9iG6chfU+sZheUox2/+450IPAc/+ZS9cJzqefmwh1D9nYwuLElcXwjXK5G/N4zBuOppRs/o86w/eOPv9FHVBFoNioQ54EZ35MYxuuaSDKnpU1LV4yoLRUpy5PhmEe2acpQ7IQQzyNEYwrf/u/9au10Mh8DOXbX7jq4k2Gs8V6SnPjecMxy683bsvfyJCDsTpzz2VNj2wOPwpKNPQ0A9HCN05LfMGrc5YeBnsUClhcVGwSpHFmuHUORIpimDIM6MsBqtFhWuE3hGPUco6pfdFEs2vc7OWrCYLGIqLvcTQeGAnoHv6OSgJDg5HW3EmxuGa5qMZqslKhRDyjYUAsJYwB8FFh8EU8qRoJtMUBsEmg9AH0WT1lWhr+EqrLaactQEAamUI850mhs124eIRljNadfDamMy8PIGmRinHB3vJfCN13VOc272f+df8dn3/jdc94m/PeVxp8PugxRC5Lj8+CSIm4xcI6KRZSpAwceoYwqcMgy7Nuxm8diHJUcWZwAVblDkCIgNWV0YxfOYy+UxfLT6cqOX1SSNkZ8DqfyL0Twm0vKzu4ycUQsRVWwRqPu0AKAYVIZrPoYcZdrL1QFQhtXWw4/TWdoPwUvSxgXdVFO8VnRkdpjgg1XNy4/8PYQmNA4xwmojypEKvakCRxWJUj3gymKS0pxNXE2wCESNQORuG4JX3y8dU0+KNUJkRTSGIBcMrjFQIgB6ivPTPVFWWl88/PCqx6wFpL+ArPsRBPEyiJOOhL8ZrZ88IXLQdPy1ufBwH3/+n38T/+stv4j5h44+qnFZWGw0LDmyWDOqsFp12cTGQi9olQLNVqtzJARIowv6ZBGfE56jk8PjmIpL9WYinzijKtlm/7ikUdqADvu4d88cbrpk70hqNADkKpzitAFI5WgdiEN7+RgAuZDxAmmxsQS1f/U1OPCa1yI/eHDMo+X377mSHIlipMDgowWnQneQd2EqR/XrcTVVjgC6zpEZViuIZ0beUKhaSJwj8UPUeo+NI0esribRMZloOeXwGs7xZm/D2vHyscHy+LpJawWXdbcEGwBuMkKgBWsyy3zs9/bg9xbwD//zu0gH9wOgOHTX/kc1LotNAiuAf/hPwNfevdkj2XBYcmRxBpATu1PtpM1U/mrHDVBNjlg9ZFYkIFxddqUK02LZOZHKv5QsYkaSo07WOiNytBIbNWoaJIQNIxzcOYOT0x3wwajRtlCLp1Ma4U+nHHEu8O3PPYhDdy+dckwdwwgOcMTxxpp8u5/5DNK77sLw+hvqDwgBRY4cR10bDPk6K0e0YIDM+HIcbiSYNeocNcmRoTCN8xzlpG7dLNS1zBiSoD7F8mYYSggdUHTk+xZxXTliXIBzAacxzmy4engqk68RrSyPLQ2wZui8igLESU9BjhSpzVCMKXT50O0nwYpFqDkkG6OOWZwDuP1K4K7PAv/y4VHJdYvBkqNzFNfdfxKHl8/eBMMY1wsLqTL5ayEineVDCAqzfYi5kBexDqsRp+xp5/HHBjniguOfD/2zbhFiQgiBk1kPgfy8HuNnFFZbMZWjvFH3qdcDl6RgnCJUyMWNqWPAT6kcHb1vBbd89WHc8A8PnHJMXsPUG51isTWxdHSI4w/2Tn9gA8XxMtQjGj35eJ5CrcKuK81qgqEYk9L+aFDW15LkyBUQqyhHKre/mRRIQMCEIkdMj5nDqZm2ledIUIqs0RGHNYgKM0zWnnweS+pp+jnlcMB1mM+TBGtw771jP+d3DyxheaX8fjhjiLvdscedCThyuCQaUXgVOXJUlXCRa2JmghUcnM3r26k1bZ+buOuz1d9ss8t/bCzOOFvt4MGDuP7663Hw4EHEcYydO3fiaU97Gn7sx34MrVZrI8Zo0cB98wP8P3/zXfzIRdvwuf/vvzsr75kXBdTCIjQ5EogMQ7aaKAmAwlhZMjPdNx+CcNmkk0xCoAvCs8dEWO36I9fj1775a3jxxS/GB5/3wdpj/bwPkgndghSCI6drX7xXotXDannfMOBmo4ZdJrUF6goEkGG1UyhHy8fKhSfun2J88TKKIqjdNRycfsESQuBzH7oVeUrxn37//0LY8UeOySmHeuW0YGjJ5xXHyjIGvFElmmXV+3qqYyvYunuOWMF1qqTrEmSMy+ayDd+MDl+p61TeD6NCtpHKnxGTAVWeI8EYcq9BhhphKGZ8jz4c5ACKRg2jnPFaGn9YMFDXRf/OO4AXvWTkc77hb27Cz8Tz2C5vD5YXMTm3feQ4PWIhkAz66EzPjH2s/FQJZrPuKDmSNx144CgAUMTjyBEVELQiR8nCaCV4i8c4ki7w0Leq2zQBvGC1o895rFk5+tSnPoVnP/vZuPTSS/Ff/st/wec+9zlcf/31+Ku/+iv8xE/8BHbv3o23vvWtePjhR2cAtDg9Hl4qF5Nj3bNTCA4A4iSB1tiVLwQCiZmZIh8mqHuuC9Nwmkcgsi6MJ4wQymOAHN2zfA8A4OSY1iBLyZI2YwOlepMXa9/9Lp/Cc8QMUkLy0RAI1+RIZpaBnVI5Wn64rLKdRcVIx3n9mvN3Y0Ana/fFa1COipQhHRbgVCDqjh9D3zDkLsowIVtZgZCkSDRS9KnRbd71JdkS7JSG40eCmnLkOEZ+3Gk8R4odkcpzlBvXdOG4tbIAynMESlGQ+mdgjeKhzCi+mbR2l/eNUY5U6xDCBQLpWxref99qH7XmR1qtX5vCDVd+HH/+5l/AobvuGPOo6iWX4qJ4YSRxQujffPWDj6LReYkVDJxV1d+zXveUY7J4DOKeL6CW21mcvfVnM7AmcvSjP/qj+OAHP4hf+IVfwMGDBzE/P49bbrkFN9xwA77//e+j3+/j85//PDjneMYznoG///u/3+hxn9dQmU/xmIV0o5AYky3zVDhCIDN3ucaOmxv1cszwCM8GWjnymBF6O0X6LwBkDx3Agz/5cvQ+//lH/iFOgyOH9+Pl/7IH0/eNKcRIY0ykptDKkOdrL3bXNTxHI9lqxmJCKMdvX3Vn7XG1e1fkCOBjlSMhBP7pw+/HbV97H9Lun4Em30EWjz+vSf8kIlpXeqM1pFgnQ7O/1yqvbaiJi1H53RdHq+KXImsqRwY58lQmJBupvvxowQoOoTxHnquv0SZ9NL1zI6+hPEdG3zfa9BwZyhFDo7p5g3hRfS5cCFlcNW+Qo4Jx+LLHmSs4XPkaSW/10GbbqLQ6PA05eviOWwEhsHj4YO1+s7kuADwu6o/WLJM3yySN8rtLxmRcFnkOwapxFNmpF1bOGT773v+Gr/75h0953FbEHUe6eNEHr8U37hltJbSpuOMz9duWHAH//b//d9x88834lV/5FTzucY8beTwMQzzvec/DX/zFX+Cee+7B4x//+PUe59YHZ8Bn3gB8/KeB9NQdt5UKEeV0pGnmRiExpHLqlYuBIAI0re7XEyUA6laXllnML0u7cJRyZCwU2Wr+EiGAb70fg098GPmDD6L35S8/2o+yKqY+cz929EJc+u1xvp8CncQMn3Bk6drrtZip/M3SBub5cRnBP9z6UO1xZdmlOkTDx1ZIzpME9//r9eA8A0QKlt+NdDiewERRhITVF/XByuknY5MQrUaOhgapWRqUn02F1IDR6s6l5wgA3JrniK5z+xCaMyjliLiu9hqNkKPmb8og+qrOkVnpmjXCapXniIFDfdbyXDeVPKp/Vy6IJFlFwxhfKkcyHMgFQMrXzwpj09F4XZ9Vj/VPQY4E51g+ekR+pvp1n/McwtDXtiejtbBUSJIAgBz/OCN90jsC80wX2akzEbvz8zh4+/dw97e+PmJi3+r453sX8MDCEF+68/jqBy0fAL7w/wKLp/YVrifuufcorlt4fOXDXgdydOzkCXzgg5/Et++47VG/1npjTeToJ3/yJ3Hy5Nq6kO/YsQPPfOYzH9WgzkfwvEDvq9/Ewj/eDBEvn/LY5WE5+QhRV4+Wj0d46Lb16xZvwvQRFHp3L+r+CPmrIaQyDwMAMwrWpWkXjt5tGsXyVttJLnwf137ng1j8zhfLt4g3ZreycvwovEz6SZxRwlmwArODurs2i09NYk3UUvkbyhHPTFMuQcaS2mKnFqDCrY4rxu3OGwuOEBnS/vhFaDBI0Ky7GfVOfe30Ty7goVu+o8nDauTIrH11UilHx03laJWwGvHgeYqwMUlm1g+UGkkFrgO+SjFw0SAa2nMkzDpHBjEhlSFbAFWI2PAlqSwGJprkSH12F0p5yZp1jxiHr8vK+8j9st5VboTkmqQl4GtTjgbLi/q6KRrvm7O85seayjEa/hbV/4gkgDkdQ46WDtQ/U3HqkGncrTIpszHX+lbGUDbzjcb4DzVu+ivgex8Dbnl0RT7PBNce2ombli7CkVh60+ijn4u//LUb0L5/L6774t2P+rXWG2v2HF1wwQV4zWteg6985StnTa04r0AIjv1rB0v3ToEeeuiUh5r+FfMHdM1f342v/MWd2pC7nki1H8VF5ipyxMEM5QimcuRURIIb2UlZ1tdZQAQUakddDMf/0A4v3Ytf2bMLh4ZlyIEnG0OObvjHT+m/h5Oj/qci7WF7vx6GSpO1K0dmKv+I58i47XACOFnjGJni7RmvMYaY5YPGfSJDstwdO57eSgEh6sQpGY4/VuGa//0nuPEzfwSW3lQevzieHA6zUytHzbBa5Tly4PgVORpXTfrRgOWV54i4nhFWG2/I1txpjHJkGqkpcWoxOF3niDGtvKjMuKbniEbV7wpSgcobhvWMcnjSkJ2H2xFPP63826iPZFblJoIjENX4Bsurl3RYPnK4GksjizBjmSaTABAWzqrkqHzjcvzpmHBoHpVmbEeUvyF6mmSGYbfaIGbneGbbIB+gm3bXfLwqjzI8FTlalmtEdvaqjadF+dt8aFG2tlmDcjRYTlf1PQJAT/Xhi0YTOzYbayZHH/vYx9Dv9/GKV7wCF110EX7nd34HDz744EaO7byCE4YId7hghCC5c5wxsoIZoomM3fVgqZxUhyvrWzwPMFNvHSQ6pZyBmhO5oRyVndXK48x2AknWA1Hk2qFaihermIGPD46CCIGdy7KD+gaQo3Q4xP4brte3nTFzUhEtYCqt/4DTeG2eI8o4eskpyJGx4DgiAGmQI6FrAFULSjIcXTCyFbnbJqpTm8BwcfzCGA8B8IqUAEARn3qiffiOW8vPk94AITjiE92xxyVZ9XmWlHJUC6s1lKO0Cqt5vsx+EetPjqjhORKupwnLCE4RVtOeI4NIcGMjYLYPEQVFtWOQ12/jtYtBVz5sKkf181MqR/L6IT7gljt3aoSlCyMLLmiUmBgsra4ILsmQGjAaVkuzDGYBS5e5o3WOKke2VsfyMf3juBy/j5IcsdOkgUcrhnI0pp3KuYKCF3jtP70Wr/jcK0qyuQYMpWdPKUhjsXJQvsHZ8f0IRnU04GBvTr73qdeZo/ev4OP/9Ubc8PerF/yMpHLqpo+9rLc1k6PXve51uOaaa3DgwAH88i//Mj75yU/iCU94Ap7//Ofjk5/8JNJ0/Rfk8wk0z/HlvRfj6h++FP27xtcvUTDTwpVyxBlHnpR/Z8n6dzPPZdE2AgcJKuVobHdwAjBhkiPDb5MbCzARWooXYxZ7AOily5jrAy1VyHm49lDWWhF1l2sZSmEx+kMt8gGCohlWW5vcbxIjAEgb4SImzEW6A+Lk9dCbUhu8DOqcxtEokVEZQIS0oRba4dL4EC3LOIQoJyYHZWPScX3dTJgNTHlxP5Lu+OPNwqBLwxyLR4b4evoCLOx4avlxmmEjqSQR4sIL5PUAVlPU1gPl61XkiK1myB4hR9WfWjmipufIMYoiGb3VGNVtUdRXPEKOhqZyJMlFM7xFOTxpsCbE1+S3MD08hnLURv16O1UhyOWjpnJUJ0dxmkCg+i4Jd8dklapUf91sZaRcAQBwSUp9IhXg05GjnkGOTlEJ/FFBCOBTP1f6PPONCd3dvnA7jg6Popt1sZScuiirwlBmew5WU46EAFZkVnhxZuMe5kP8zOd/Bh+46QNn9Dxz47TsBqCMrPrePMvw0Ktehfv/oqyJtHRkdXKbJOU152WPvTJAZ1wE8qKLLsK73/1uPPTQQ7jmmmtwwQUX4M1vfjP27t2Lt771rRsxxvMCXhBAyMrTyw+cuhzCkkGOlPSaxdUPKYsp0oLhnf9wO665e37k+Y8Euax6S+Agd6sFrCbFG5tIAQ96VTEUgDQfVMoREVCLOFllAuynK7hwyfDfnIoc9Y4Cn/0l4Mgta/5cAFA0iL1LnZFifXmRaK+Uvq+RsizyHMf/27vRb5jGzQKQwJj2IahIF4E7ElZTYR/HSaAzgsbspjOVvUR8gJQEL1rpjhwHSKIgw2oeZM+29DQZRIYxlqbfRbKKn8kM9S5FGW766g3oZjfioQt+VL5PI6yWVKZlL6iUIz6m1cajgZnKzxwfgqwy/RnXMQDdgw0gVZ0jgwAyOLX+a4pAlBmFkjxoT1KdOOSKHBFXH9s0ohdMwIc8Z8QvCRJMTceoyg1gkpRjC6dmQBznlIUgl0xyFNez35Ik13WhAIATgryh5gn9+QjUGRtnu1CKnS/JIhOnU47OQlgtXgbu/yrw0DeBq9+1IW9xw9GqGny0xtIfqnbcqp6jaBFQr3WG5OiOxTvwQPcBfOXgV87oeflypT5yB3j46CxAx//+s/0PIPv+PVh5sFx7VsuYBQAMysc8Gqy7x/DR4lFVyH7hC1+IT3ziE/j4xz8Ox3Hwv/7X/1qvcZ13SNMcLCh3hL355VN2XR+nHJnm2DyhuPHBRXzm5iP4w2vuX5fxVQTCQUEqXwgvTHKkVhUCLjxo5Yia5Ciqwm+o+rSttjD3sh4uMPykPD2FkfN7Hwfu/Hvgu2d2HRaS4CkPChFsRALPBqbiVVb2zof1Y5I770T3M5/B/PveV1sgVhqTQ5McFWZYBi6IkzXKNMj0c59WvpQxJLFaaAN40m0cr5KezymDkGE1T4Y6kK1+bjljNcO3YIuIB+NTyevkKMf+274Gnt+Lnn8CzPFqSiJgGMmJCy9UO0gGthHkSC721AnAVBscUl/QRwzZRtFT3VvNUI6449aUI2WOZnkC4DTKkV74XR0+LRphqZwx+MLMelObk6omk2nInpDkyO9MYGJmGwCgf2L8JslUjrKTB2uPpVndc1S4AmGvoX4IVSWcrBo6LA8rX6clQ35cnNqQHRmG7LWGr88YZimOWz4K3PPFdX+L649W4fq1kiO14V01rNY1Ns9nGFY7EZUZqfEZkqp4sR6aPXByblVixiQRT1tl4VFz425ikA8wt1LNfXF/feuaPVo8YnJ08OBBvPvd78bjH/94/NzP/Rx+9Ed/FJ/85CfXc2znFYgg4HwfAKDrTSJfpZhm2j2BIK8mDuU5SqO6crQsydKRlXjNBvqMZXi4P/59qZT6CRzkRHlvBESe6d109WEALvzKq2HshNMigl4wHICI8sfhrBKW7RcDXGAqR0xArJbae+Ku8v9Jd/zjq0AtztwN5PA5ksakk/YV8WiDSFWGRvUxc7nQsZOLoHffUJZnQN0jBtTbhwghQB0zrOaAuFkjo6083vUqpS0bk7WX9gZy/L5uMZH0x0/IZQ2hclw+L0k5OYXHp54xVH7+1covxAb5WxrkYEPZvFSkGExdPFIhuwrnOPADSY4EA2+GcDgHFu4p/9+AEAIHb9+P/BTk2UzlLxy/VHwg1Q9e/05q0EUgic5Wq9QdSQpqhmxJcrLqWoejwk/1sedxtelQBIo1yREVjbBaeQ1wQrQKVxjj7xBZEylsoyWvkxNfGS2BEfd7SAwT/1KjanUSJYARViscjp29etaZUrvMumbj/Leq4W/bKccmxKlD/yY52jDlKI/QdRwsq8zaGz60ri8/H83j/pVqczos1kbyVFg6ytl4M7PyGwFnrBzNxyVJjuna1wUAiE/Ww/PzdGpVzxGT4f1EkqN0FXJ0PDqOgIf6djw4h8lRmqb4u7/7O7zgBS/AZZddho9+9KN4wxvegAceeABf+9rX8PM///MbNc4tj7DtIw3LCa47vRvpXXeNHpR0Ef7JD+Mr4W/CQz3dMzOUo+RkV/tcopyhv0YP0rtvfDdeftXLcefJO0dMqTRV5IigMMJALV5UqduiktiZ8LQLQRhZNWkRA6YUL7flJB+/0PaKqEaOgFOYshU5ys7Ml6SLVJIq7p2k9YmskO9JSFCGrQDQRio7N7qRJx9+DfDN9wKop/ED9cazLIpAjV+hIA5AciSSqJQLZfn5/cDRlYjzMUpbogmcj0Du1NNk/HnVhEQAPi+VCDLGSKugFihCPMApJ7RmvSYFUzkaZBSiUOQoRnfmMsTRSu14NRYCF36rI+/lEE1y9O0/A/7s2cC17x95z29/7p/x2ff+Oj7zu6svcJlxjaUIS6+QejeDwAtD2XQdYoTVhPYcVWooqf1PQKCgsvRCFkOTI7d63IS+roz+bKxB/nPG4at6ScSHUo4YIdrzp94TADqS9DphG77MVuzPj9bMMVUjACNFN7OeQe4AUEKxa1AnR4rsMUM5aq65nFWKXYeo74Cfso5VLay2QZ6jIu3i1RfswUsu2oee4wDxqYtlninMkBpwJmG16nuIxhVCXTG+g0eoHHHBkbK1+4SbWa/LnTZEOp7sDRfnwYmDrDULAKAZK3tzNnB8eByuQY6Sc1U5evOb34w9e/bgl3/5l7Fz50586UtfwsGDB/Ge97zHFn1cJyTTMtusPYXkzjtHHhff+d/45Z3TeMe+EDMoL8xxYbXkZK9mAj6yinG2iYe6ZXrobVe/HfhfP15rLGju7lNSXdA+K0bq9gCkVI7U5UVZVRuHJtpgbPoUyCp1T3o0wYWNOYuP6+id9qsd1WmKaDbRj8qJ2CGV4ThqZm4tqYnA1Rl2PG20UjDCTumSDyyWu0YVVlNrrBlWi07M11QHASKVI6U+VOfFD6DJ5LimrFm//NwOHITyNfNVFqBC1p5y4MFTCVWnCOXqBYqEIFI5KlbZecZGhWxwAcHKcyd4jO62y8BGDNmGciTDagIMvGns/cZ7yv9f+z9H3vPEg+WCsXxs9TIYuUFeIxEajXwFBqYKaNTu8RwCosJvMHqraWJYqUrlQZUhOzfKXDirkSNDkSUyNNdcSApaz1YjSjlyCIQkV4VBqEJ5rJPl8OR1ko8JTanijzo01jjfWa/xOyICk0Uj8015iBy3XhDKAKMCSrGbDM3w/yphGUpritZGKUe9eAELnofMcfCp6cl1N2U/UnI0MMJpY9P5Vwx1/wzHrJSjMxkPAERd+X2oZADPBVsl3Lk0/yCycBbCKI6aj1GPjkXH4IhqLRn0HlsVt9dMjr797W/jPe95D44dO4ZPf/rTeOlLX2rsqCzWA9n2ckFOfRfDOxsZa3mEQzf/Bb7TbuH2VojQ6wKozHtpVIBld6OIrkbSH6JvkKOjK2u76JTs+/DKfmD+ToiekeYrF1MCgkJUlZV90KqcgG5SCTDhQy0cnuDaE5HxHGqiLFOgJTkaUzgOKOsfTcvh52qBGTepLtxT/X2GylFvWCoZruhAbfGj5g9fhoZAXJ1hx5vFHI1wUbrs6xokyiO2a6qcCEwyGZ+shzIEIdJzVE4mZmuRMKh8XMUY0qNCbQ4chG55XselVQNVWMgVnu4GPy5cBQD7Twzw+1+4VY1CK2ccHHRMRllkhOdmeAoov4xI0J26BCSrP0d5vkgtrEZHydHjnl393SiUmsnvq0hXaqURascYhUYHogUqJ29BBHr9McVMRUmOxqXyjyhHxkyqr3VzUffUSzfCaprkGoUkG+SibDyr1DUP6hrlhIAr8mMoRy2VNt/tIm2VNWniMYtTLMMfofwsjaEh75sbhJIQe6z6TFzwqno7KnLUpMyMch1Ge2gKUCej2SalOS6FjUrlj9IVvPY6htd9i+FTU5OI6fqSoyODcv6cCcvSC8M1tBuijNfKJYz1HZ0mrLY0zPCWv7sZ37xvtLmvUo6AM/MdDeXGi5COvi/vj59ns/mHkbR21O8bc/0dHx4HMdT63sn1z0R+NFgzObrjjjvwa7/2a5ibm9vI8ZzXcHdIcsGHOLFSb+2Am/8Wd4lqMmk5UjmSi2jSS1DEV4Pld+P4sS/UydEaG9SqncQhWYhvwch04nLBIwAyEkDN5L5gtYrIQBVWU8e4ovrBp5zqCZU7ribYhI0nR2I5w7E9z8YDFzwR+/fMIAq88WE1FVIDzlg5SlMVMvK1KhT16xMZ05+xKtbHs/pqIgw1J1kJIOQ4VLba3pl2+ZipHC3WZTFOCByk+pjCUDSCtq/DauP6jqnigQ4haAXyOM5GDMZA5WtxhQPhyPGsku79ye8cwq0PyB0nCbXnCiIfm85vKke7C3OB5WAuQRTsro9bh9Uq5Qjgphe4xMRO/ed1X/10zeumF1ARY/8f/Q3YcHQh0inyAugJX5MjAOgZRUhNcuK5jlaOACOVnzWVI/XSlXJU6zuof84N4qNCfQRwpC+pyQkLxtESRlhNK0eOTmQwU/lD6U8aDqewvP0p5ecrQjShCHIor7UmNy7UhgAuiCObFBtqcsELQJMjT5vSm4IiKziUd+n6yVirD1mzaKmE6TcCgHSDwmrR0gm89l8EfuZfBX78FgefCZ3Rwa+CA70D+OPv/TE+eMsH8eWHxrc0UkkdOyRRiOjpP0fU2DiMVY5OY8h+26dvw9V3n8B//NubRh4zydEZKUdKrSdtfV8+GP/8oruCo7Md8KLaXKdjMtaORceMmmzA4GR3zeM5G/BOf0gd/+k//adTPv43f/M3j3gw5zvas9MAUgg+xGL7R+sP3vVZ3B1W9XcCp7ww1Y8nNQhQns9DPHALgD0A1q4cDeTO5pBXKgNmunhFjghyEaDk1Qy+KHRmlVhNOeK8bFjZAlJBdQYMc30IIjOx+HiFw4m34+4f+HEU0dWA2IHM6+BJY8mRUX4+65eT3BqVzTRRNZyUnyND3DAyc9UChbhwOQcHIBr9N3iaIursRph14eUZioU+AkCb4y/Y1sZth7s1z1GyUs/+4UQg5KlWlwrjs7ZbLUQyrEbHqCOKZHgg8AMXKACIHFlC0ZqoF7DkcjElgoAG8m9wcM7g1LLnSkN5wKW6Q0JAG5kLRIdPYmrnVO14c0LfmXdrjwkRI2rtq91HdUiVwG/JyVewESVDZCmyFQ/hNoqTt34RNzz5NXjuE0rClBnn8cEvfxO7drcx9wv/ofb8KoRFsExdCL8Kq/WGo2E1APBdUlNEVgurVQRK6IKMeV69puOrRxvKUV4psnAVuWgQKNogR9pz5GjlyCRHvvyu0mAPiGo9MyaJQZ33QBWtbJIyTcxdEKcDwZdryRBpXhWJLFzDY9h4H2a0belN5CAkgBAR4pOLwGVPGBlX1G2ogo9QOToRnUDBC1w4deHYx+PuIlQg/XXf4vjLfR28sUiAoDP2eBN/ePMf4toj1+rbz9jzDOzq7Kodk8r2GnPtOTzYexBRfnoyMmxsekbIESsgekeqSDxNSlZrtGu6fv9471RURBgYmxVTKUujIYZLi9jxuMePfW6qSrkQX5ZpochWqU0X9yMcnnaA6AsIZ/4zCHHGKkcrR46A6Q6HQLycYCXKsTjMcPnuqZHjzzbOOFttZWWl9m9hYQH//M//jH/8x39Ed5VaGhZrw8x2tTPOsTjZaPCb9UtyJGeewC0vTOU5insJTPPK7nu+qme7Y2NiuYLz2iScsxyFlO6Pey5yAGlSXfxKaSAAClEpR67gjbRzgBOACl9Pli64JgSpqDKGmBfoiZSMIUdLwwxeMoli+HlAlGPpTe4CH9dfTZKjckEV9TTd0yDXvb183fwzbviaeCYXQXhwlaRB68vAoM/wnWf+Nm589u+CuiGSo+UYulo5KlWRmnLUqGrNCRDyVJ/TQhuqXbRbE/qcjgsdqVCbS4CH+oqwZmN7oCly5AiAtarrJh3j7+glBUJVdZmEcIXKbc8RHxudiGNjQp+ldRUAPAYnAYQR7is08XYQqLAa2EjW08q3j+HA1btw4nvTeJ5zO46tVGNNjTYK/c4s2JiWGUqlIYKgKwKdrQYI9AdmWE1+VACe44AYC09TOdKlD2vkSPrFzKw8va/hNSWvMLLe1Ns0v9mcVe1ACDzN+blDKuXIuBZ101kSaiW0WbcLAJgkR74kehz1zQRLK0VPETIHXBPENEuNjU5L12lrkrsiZ1DK0aCVQ9Xgipcb14aE+k14oVSYHoHniHKK133pdXj1F169avgoNUpReBx44XXOmrO/FuJ6yKqbdUeOyaTBe4fMyF1LtlqztlEzrHb3Pd8HEdy4dlHrcbY4TKAu4O0T9WK2pmoEAP2sGs8XPvB7+Pg7/18sHjo4Nrye6/6YXIfVi1X6XOYqaiFSCFaeg6bnqLdwAv/mCwNk/b8Fy74PIQSWlhI89w++iRd/6DocXNz8ljFnrBxdddVVI/dxzvHWt74Vl1566boM6nzFtuldOO7eBZ8RZC4gigLEl5lRRYJ7OwFe9u0yJHH80gyIDM/RoMoCAwC/SOCLAgUJRpSjIkvx8Xf+Kmb37MOr3lWaXM0fLicER3wPmUGOBFWLAVCgMlt7oMZiqGqeOGA1AiWqsBoT8OT0n/stuESqNmMm7y/cfgRhGgKoxl+4jk6ZrwbMgRN3I1oIcPi6Oez+kT5m0z4Qrm33ocMfpKohkzfT9AsPQA4CAke1oGjwuX5UZt9RfxL3X/6z2HXirzCDqhfevm0yrGaQyajfLd/amYJgGYRUjhSZLAxPSieYxJKsYTMu+0OpAw4Bhm6nzHASGdJhAdQ3tTpNngBwOw6QuQAYVvon0Zmarn+utKgpR45Q2Wc54oXRBc5M5Z+h3dpjQsTgjg+RZSCyyawiCAQOAqkcCcFGwmrdW0tFYWX/JJ7wlOPw5m8H8PjyfBietWG7NZZA01yZ0IHECdFWYSAIDKPRzB0BwHOJimJJ5Uie90amXmXaFroIZNXI1YXju6XaKMrMHU+qc6bi47jVeEwUVCBQGZ/ER8s7gQxl7SQmTea5saD5TCUABFoNYmMWPJVkkbX3ARiMFMWkWaUWq5NAePn52oGLNM91Be3CC0BU0mrzfbJc3zs10Zbqw2gGlIIKq00M++j54VjCfjo82H0QJ5PSPH4iPoFLZi4ZOSZthPVmewDyCJjYMXJsE/28/txkTBPWVDCAANtlLbi1hLGaSlHzdu9YmeRxSOzBJUS25CkSIJhAXMT4mX96JVoX7EB69Bcx06mrxaYZGwCOG+Rw+dgRCMFx8CP/ATsubwG/9PWa8q6KfxLByu9PJEjj8VYIZijqojgEeLtq2dQAsDR/RG4schTxV+GhQD64BH2/fJ+DSxEev2MCm4lHVQRSv4jj4Nd//dfxoQ+tb52I8w2zE3sQt2T2B0lrrTkeEhlmVzrYvdLC7pUWtvnyByd/PEk0+uMMeHlBmp6j713zML74J99Ad/44Dt5+qy4ipyTfiUTgl7/KsPLtGeRJRZgEq8gRRVXDyIOpHClyVCpHFTliOu07LapLLg9CfQWSZgwFwD/eth8hre9+OGEQUaP4YO8QkA8wPN6BoA6i+fCMTNlV+4rKc9Tt1ScyUSjlqPT0ABgxhxSG/D+/59l4WPwIwJn+jnYqQ7ZBHoayqjWRhSU5eE05ojrDykE7mNIerRGzMgAqvyPPAbp++XpCZEiGo5MYo6o+jYAXOlq5Wu6Nmjh7SYFZrrJVAnhK+RAFkqXRIpOmb6LNZO0loVSFBNzxatc2VaqkIHCNIpBC1Ken+IJLcOsP/woGkxdiZf8EiOG9EEY/sdR3xvrSlMeGCILYacHz1LUlMDDCalUqv4DvOrpQKcgpPEc6EmkoR7qnoAMnUAcwcEPlUYUuCSGl5IdRcpEzBl+dc+LD96vviCojunE9uKpyMWkhkGXdOcSIoqPIUdK5qHxfQmptRnhWqcW6kr2oyF+W5Vo54kGrprDVxm94U3bP7pHhayBpZsNJqLDahLxuM7mBOBPcvVSF2ZfT8S108jjC/Ze9Fvdd9ioIAK0MEGsIfQFlAUOgUg7TRrVoIQRS+U3OnQk5SikeF96FZ17823hS59rRsNrScRy+bg5LBzvgrvytSLXrwe6D6OYn4E3eB0CMZBE3laPjvWoezSRROrLEgaM3jyjvVBKesl6V/P6S8f4sTitSRbIHAQBpo7H4Qq9eWoIXD8OH4T86VV+5s4R1IUcA8OCDD+pJzuKRYdvkHgzbslcaEl08DwDudhguPVox6QmvnDiUITvTi2jVe0k1oFwc5lqJuOXLB3Ho7gcAlJkzqrjfsBjikuMCH/hrhhffKtDZH4KeqH5MvKYcVX3TyrBa/XtnICgQaALlojJkU2akd4YdCKIUjEZavBB4YOEIfCrFTWkEFKCj/dW6Zb2WIisJAU2dM+pWTfVCXZGjA8e69fHI4RFtiR7N7lnq1dOcj7WfAeRDPUnNSZnb9BxFktQSR5IZcAQsrwzZmSr+52BhWGX3jasNpzq+Ow6w5G+Tg8xw+wOjISYlJAgCuC1Hm6y7/dEwWT8psFOsyHGEVXabyJGs1MMQQohaaMCj5STbUX3peKUcKVTKEcHRoZIfGESt5xxw2+SrsTL3b3DTM96F5fsmwPrVoieMdhSUpEii0blINYt1BBA7IVzZ5FYQgaEZRjUN2bVUfqFDSlUFe+U5cvRzFVGpyl8QOGFVVd5U/VTzWAEglerQiHLEBDz5mh5jgF8tsjQaNWQrckRIgJYnrxciIBq9AJk870VYKSWZUReLFyr0KirlCG610ckSqBA59ds6tNgcf2oswhfsuhSKaJn3m1DK0aQM6xWU1lrXrAV3L56eHGVDjiMXPg9HL3wh0tZ2dDIgTcaH+kxwwbXSrnxGTXJUFLFubLxd1gNaCzmKMooLZ7+GezsUs7PXjoTVyN334fZiD5IDAlTWG1Pp/ItJ+TmJQ/Em73PYkx2sPbepHM0PuwDK65TK6+doMl1e/o0SAepx4TB9LYyrASs4R0EqxYryExBCIBvUX687bFZaL+A4obLdjTein2WccVjt7W9/e+22EALHjx/Hl770JbzhDW9Yt4Gdj5jp7EDUogBCCD5AHiWlVUEI3E1cPO5EZRQMZBVc3VtNTcQkAIgDiAyByOE6ZT+oo90Ej9/WQZ4yCGb2LRqiNTGJlbSP113Lsd3gFDwylSNFYgQKlOZLgVLxaSpHDASUV4qPI6QhG0BeI0eT4C4vyUaDaRRMoEVW4KkChaQNIRII5GDNthUyYyOXd7PcqWWsLR05DNf3sW33npFzDgBUKWTEgyMcMAAr3RgL/RS7pmXdHbkbIoRUylHTMEwJ4Je/CcGOI0MAZAOk8rPPdgL92QrG4bsOUlVcU5EjwhHyqvEsNcJq19w7wL+Vt8Zl3StyRBwH8xMcPxgDQqS4+4Fj+Gn8YH2sumCngN8JNKHu9pdGjuslBbZxVWAyLBdotyQkUb++I8wo1+pK+QIFAA/TCUUUVGE1s+wBVcRbAB+76RguLj8N0CBHFMakm3voPHRAj1EY7SgE66GbjIZGlMeGCIGh04YfqDEIJOP8E6TMVlOKDgCjCGTDkO1W/iVFVKgqaUAc+EFQlmYUDKyozo9ZAVnIbH6BUqklrmwVQzk6qpYkK8Acg1gq5cjwoDl5Vl6aTog2L3/rHAKs34czUW2wtBHeyBg6vLCMJ1wie+0VVehVKSREeHqjkyWxji1Tv1OrB2Ui18qPg8fteQoWyO0A6p4fE8pzNJFVhDdZOoGJXftGjj358AG0Jqcwtb3+fd+1VGWvrqTjCU8Re+CsfKw78wPYe2IJw9482mOPrhAXMbj8re3q7MKJ+AQS1qioP6iUke3xCuCvzXM0zCiioJy7CjcfMWh3FxZx377tmI0T/DCGCNDTytH+xeo9f9q5Hf8//kUg+ikdJmwqR4tR+T6pMc+nzMdS1sGOBpFT1yknHC7KsOg4csQHA6RhtU5xQiF4F9mwHiIz/awAAPn73eG5OMEYBumpe++dDZyxcnTrrbfW/t1xxx0AgD/8wz/Ehz/84fUe33mF2XAWw7ZqPz9AoaR+muFEbxt8Vn1dqp1AnDEwyg3DtKeL9AWiwCUybnusmyAZytL9rFoAU5nyvDDsYlskwA3fATNSaNVOmUDIRqmqhpGo6hyZ5MhI5XdQhdUKqsiRizycrNLIG7J/ShlaXg+u/syhPq7qZC5RxBACyHsyJJk6QFZOvHma4JP/9dfxqd/+jZG2DPpzKoJCfLhy9+5Rin/4XpWKqogQgQO9DjbkGyoJFE2+hXxwJVbEYdC4q+vezBkGSaUe6UaejppQBFq0Uo7M+lIJ83RuR1NVKYdYvk/iMgwulY0lRYZu9P3RY1VkjAiEU1VLlMGgkUZdcBRMYEKoCuGh9rRA5EgaKbpNc3752aYwIUOXgpdhNWH0V1OhJQcOjsVqkWfgvP4ZZ7LDul5Of+pikEjVNqpMqOV79NAb0+VbVcF2BBC5LfhhdU2lRuaNbhGIMlvNzN5THed1tp8qRaFDYlVNL6brPTlwW1WJAm6oPLocARFgTkVYTWUtZ1x1H4EnCmTEgVJfimRUOYJWjkJM0AX5mhysUZdGk0Xi69c7dKRaYFXotk55XP35yixPGVYLJ/Q5aPi6kUsSRODi4rnLoJad1SpfR73yGmwVFK58r5W//4vR47or+MS73oZ/+B+/XX8/ltfadiylo8opAOSph3zwKeSDT2F5W+mXjZZPjD3WhAqp+Y6P6bD05420GxqW59ERAjP9UrFpKkfjKsxHGcViWH5/sUdHlKNckvjU8zDkcrMg33shLj/nJUtPxXUL78e/DN4I8bm36gtaKUeCldfistwUNsOWR5KZEeVIk3hHQH9/dHQOKroryLy65sLpEeSNFkqVz1P+BqWn8aKJcmznZFjtm9/8Zu3fN77xDVx55ZV485vfDM87YyHKwsBMOIO4rQqyDZFrcpRg6kTdJKuKJkYZRRZTCNkywIWjs0FmHIoLZ8t90NGVRHdRF7xSjhQ5Woz76G1/JW74d+/HYFJ6ENJRcuSUehHUpUMEQyJ3N9r8CQcFqmw1z6hz5GQVOSpq5IjXmmemBUPoDuAIlW5dqQYjcjxNMUxcCKrCGQ5E3AUADJYWUWQpkn4PS0cOYRy4lnBdvfC3GMdCv1qgBK+UI1dlJjXruTACVhwEy24FAOSIkcfVgjTT9keqZFNttK1erJVXoUqzMjkz+tWNK8eilKOloEB3qjqXQZKOHMt1lXKO9sykvmbStD6Bq0rrKkQLJ4QjvWwCOfK8PkGO6yROnBl0UkloxWhYjRt+tpO13Wj9tQdsAVn3I2D5fixt/yG4MiScaLKspjOKeAwRVuUoHCFAiYdWWBGoNB49RwJChtWqlH+tHOm4pHznccqRDkU7uqGuELSWaVhTjuRrcIiaJyunXH/hHssREU+HfwtJMMzfjq5XRUJMOF35mhys8bsp9LVVvd7Jw9VvRGhyJHT1bsCrPEdGMVYWTGj1bKQUgSSeBA4umroIqsr7iHqgjpfhP59x+PJzLf/zt0aOWz52BJwxLB8/Wgu73bd8H6iR/bq8SluQLHEBUSYtrMyU6f5x9/QtRJQZeyqYQscrNzXNVhzZsCRZoRCYSsrzbpKjv77hAJ787qvxLw/U328xWULfK8/f0OUj4SWVNZa7LrpqLpXkSIUPn3TiOeVtehHI/quBuz4LwGgdkpWhQJWtli7X/T9H4umR+kmqmXDgQX9/GRulD72Fo8hlKNeT4VpOjyJt+B5V+yNVVJLIzf7esJznz0lyZLFxmAqmkIQyE4oPqsamRYowrWceOEnlOUqGuZYlHUG0CrDTTXCBzJA62k0Qn+xDiAKCVwt2GpULy3Lcx9Lul4J6Hdz89P8CAGCGIVuFvUiDEbjjlCMhG89KeGCaHJGi8i7Q1hSEV5EjszJsVnAE7kBzBgroXUbcyCzKsz5+N6gKBBIQsOXS/5MYC8L8g/djHFSla5dxuHLRCxmrL/SKHDkErloIR5QjgSL6avUUUSA3zOMt30Hbl/F62R5EpVhzp/pMIa0y2qg29Zahyko5Gv0cqrnnyTAGq2ryIUhHd6gclUoxvX0OOkuv0bOtlxQIUOjWIoSEIKrPlyhARd0wP84r4KKNUBEqnoyE1bQpWRCcTI2QU2N6ikQfAAenh7E090Pw8qT0OHVVyK9VkbwxxfE4U8ZvAIQgbFXyf2GSQrNCtusYxKcyYqswsyKrxDPIkSTpzKii7Rk948xsHq7Dm4BQJIxw3VAWkKqQoRwNEEApPaoFR0HHXBAIMOX15KgYeEM5oqptC3Ghvv+V40erA3ilHBFDOdJhNSMcQ9ptOMY5MJHr4xxMB9Oa82ar9FNUCRIuFwikGhqPKTY6VP3XhEDcr35nphkbAJYHRzAORV59r2kwiSyYRtIb708yoZSj6WAaba+cX5ueozQu55+WEJiQ59EMq335zuMomMCth+pK7eHoIf133yUYZnVlVpW9YK6DYSx/e5J09ZJlvP2qSVzULWtHxbncUB+/DUBFjli+S36O8nlZt1QX1Xd8NJ6BaBiylY8s8ByoLzAfQ476i0dRSAVxYlqWo+GDEYVZl7mQirnyDO6UAkv/XAmr/cRP/ARuvPHG0x43GAzw/ve/H3/6p3/6qAd2PsIhDryWnCxFglylStIEXlGSDdVtHZJZcwH0e5n2XDiiyjyac3q6ts58L0V0ogvBGmETqRwtp1WoShAXg8kLkRTjw2pytPq/zVR+SpyyYq4Oq3FkUilxC08/n/odCFc9l+sdKVAqR64bQfq1SxO4qo/S+KHdOjgIOqyrlmyplJjjgUmO9mMs5Pt6nMPR5IjXmz7KMJZDHHg6hFJfBBKRAaKaxDly0Lh8/7ZfVgNX5EgpR0rBYW4faoEKKRDLEBA1zMqFCLXJs7kUCs61uX2pk0AIolU3v2gQFiH0oswIw7adu3R9p3w4qhxdRo4iU14xEsCBnNhEDurWHRrjlKOAuaV3B8pz5EEYPeOUQZkIIDFskM3PWEBtHBIMpi+Gw/zSo9FVrQ1CCKd8fspGyVGVcVm+8kS78tqYKojZW813G73VFDlqGLIdT7YiAdfKkU6fJw78dkcNotbkVl/xBIDr6ntN8pjnhdbQfFJgINraFKsySouRUhgBCCFY6ajrnyLv1sPRqdxklLWTyvM2NFQEZQMkEJppCziVcmSY2J1WpzoHpP7NxXGlHHW8yptU0HH1t5gmlYs7fhTCL8/bOH9uZNSyMluO3LVY+o2eeTjE5YcmsRKfbD4VAGreOCES9GZ+YFUfFDgDrv8gcOxWTY6mgim0ZMaYSY4GS4u4+1u3ISiIJEdSSeQUOSvNz3cfk2pSIwy9mFRzVEEI4qQeEsyNUxslihyV13p4+Cj2DZ9uKDuyqnm8goIXugCkyLcDABIal1l13fL87J6UPlYagkZ1Iq3a3oQBqcgtH6UPg8VjKGSG5MT0LvncHGkjs02Ro6odSfkFb5O3xrZNOctYUxzsta99LX72Z38WU1NTeOUrX4lnPOMZ2LdvH1qtFlZWVvD9738fN9xwA7785S/j5S9/Of7gD/5go8e9ZTHhqaIqFFSqQygStIo2gBQOWgAK8Izpi7TXzSrlCEJ6CIAJdDE3US4AK3GOOKEQvP5jS2VIoh/14OcDFEFpDH74cS9FK/9GdaDaTRNjxyzKiXPUkO2AVbnNtfYhTuGiLDfnAH4bwlMZOqxBjjiIkwAodz8F8dCWPd2anaqH+RB7l+s/PrZc7gBjQzk68eADGAu56/Y4g8cVURIYGinp2ofiEFDl92k22FTHEhdEMAiRoohWAEyhLVO5W01yJJ+Ve32ExAcERVAQxDLkUbUJIShEINuLwKgrWyIzfFhRmIOnF8IjZQ6ga4ZcAIDlWjniDse2XbuhCruxRlZJLynwg3gYuTbGt+A5KQAXQpIjwbkOPTUne6D0joShbowH6ni1Jr1VQUoCYfh7aqPmXCtjioAWzj4sRzkG0sBLEIB6FD4D8jHkSLnYVdmIiYkO1KcVaalCEVLRfwJZBFKRFiL0WJulFIivptIqPMyLikD5HblQgYEaiokOQTkEjlt+BwK8FnY0u9f7DsOABCBwIVBlqRbN0g5yg/RHewVecByAoEiX6wueqqFFuIBWDk3lRF/fotZCRf1OqTaxE7hhAFeeg+amQTXgJSDwXV8rcXRMBlphXBf3P+E/oBh+AaAHkbFRf8vQqIoeGy1HHug+AMKBH7prFyAcfPuiVQzZ3CBnIkF35jJMD0f9eQCAe74AfOM9EHd/Hmzu5fALAVqEaHmysKtR5+imz30GD958FD9y8TYsXL6AjjFPDIsh5pc9naTR3Ez0s/oGjmd11Ss3ag8lmbzmpCGbD7qY3/2TEEKA0yPIyGzZKCBZQUaNMDYtr0VOUixHOVL5nXv+JCDtGUXchxmrUJXdWy0PXJLfQrhodiLozx/X69LE3G7gQQAiQ17UiZTKECZSOVJ+yWlpa3gshNXWRI7e9KY34Rd/8RfxD//wD/j0pz+Nv/zLv9TVsAkheNKTnoSXvvSluOWWW/DEJz5xI8e75THpq91sgSwuLyCex4CUHWmwHWDzQM4xMeUiyhkG/VwrR9QVemIMxUCbgJejHEmRghuZakCVqZAOe2Be5cFY2PkjuCD/Z31bNMJqKlvN4SU5KhWQihyZpm3HCJm5rCJHvu9CBAVKStdQjigD3ES/b4oA044HcCBtkKOIRtjTmP+ovD7NHeXi4YOgeQ4vqIeCVL2iMqwmawVxXp+4VJ0eECwo0trIsFO3hOeDFAxAjmxlBcDjtGKkSFKSM+mTkEZ1f4BQKUeFi1R6ypjRWoOKAJw4khzVka1UJ2DQThHmT4JHHkQGBo9xvfCXJyfTC5hwgcmZaeiWFA3jZD8pcDk5gljVHCIBXCcFMAGIHMwJIJIERGZBjVOOJpMYQbtSmBipe2p4sy4BIYAQdQJIUwio0JxcbEWApSjHynLp23Dg6RYcdKTOdBUKc1RBwnZLkyMXOSgXZbsQdW4glSPDT8QbypHy1Tm+In9VHSCtiMGBE6psHQZWq75ekSPiqeWIgxnZc4VhXg88gYFRciKT5ztvVEwnxAfhDMtTimwwZIv1jRGTyk1QxMjl64m4pzMp9YaAiKrWEwhyeQ4K1VICLrxWCDeoCKKJXBMeea6k8koFR5Iz/ZsAjHCLAAAXcMrrpiAuRDwA6VSFXYfL1VwWGb/z5XQZndQB5DXrzY8p0DgcgqMiR4LH6E1fgvbw5pFjAQDHywy7ha8exAX3/g1e+eMOrnxKjosnZRKEQY6Wv/c9AMAPHWyjf6kDF0AbLhIwRHmEO44YZKlJjvjhWjyHUEPJEwKZUUsqVZ24pXIUDFwMpy4CT29HkXwDLHgSqAjhJ8uVJ0oQCFZei8TJcaybIhuU5+549ES4zl1gXKCI6iqjIkdh29XjK+CUvfa8aj6NFqWhnYTozG6X485BqQ/BK5LNVD9FpRwRASEY2vL+QXaOhNUAIAgCvP71r8fnP/95LC8vY2VlBceOHUOaprjzzjvxgQ98wBKjdcBUaDT2k5NPnK5A6xKyAaTIOSZk7ZThINPkKfWEDj85LKqTo35WpfHLukEqrMYHA3DH2CsQB5xWk5ZWjmr76vK/cU6ld0YuCqQiRoAiRzIjSb+mg8B1gEBNDqxW/yctGISb6Qq8iRNAqPoajV3ysEi0cqSsWYUMtSSGTM4Zw8LBh2rPFULInTPgMQpXVZkWdXKk99CEGKX7Ob5xj1ELShEov4rLq2aKLdnHS3uOClbbJafhUIc2AhYgkaSIGfWlchGCkcocbCLX5MhHFhSYc54KX06kDhf1Cr4006RTeIDfnoCj+qU1qt72kgJzQikOBEAAn6hxUzDi1goujvMczcRdeJPT8AJZ7d2htbCaNvuL+rU1So6U8V9W6IaP5WGOviwa6MCDK82sfIQ+AoKrc1m+35QRVgt4XhEMTQoA1yFwdcisIlhVCxBZy8sgBiqspkkfIfizf61qzJjFQvVHdh24xuYkNrMGjZYWgceQeW3ojCFFjprqIPHgshg/Nvf46jWX6uSIM0WOUk22Qp7i8HLcGByvQosCuiwHUzW44MD3PXhBpRyZTYFr/eMAbTwXogov6WN1uMUDIUT28gOo64AtHK4dW1OODHLUzbrY0a3mrosPkpH2KU3/oRAJ8mDKUMMaOHE3aOpgZX/5Hf3gYQHB21iRX6VpyE6kt0sQHz9+y2+jS/dB6YYRjXDH0eozm3MM5RRdUhL97VSR8CokSFdWkHsGkVS1w/IyPBbEHoRgoOm3y+eyJaRiEkhWqrCf8AEur3snw9FurKMHIC04RGVBNjxHMku00wkV5wSFU2tdAlSbNEI6aM/MyCfnAAiypPqs6tqpsnQBiAJO+thRjh6xIXtmZgZ79uyB7/unP9hizZhpV/UgVEG2oTFREkey/oJgUpKjZFjosFoccm3IFiyrk6OIanLkeGXNEPXDIINqJ0/YUD6/WpzUzkGH1dSxolSOmGB6wabEhedUsWld50gIECqz3EDKTCBlQAdDYVT5TgsO6mSA3N1Fjq9DLlmDHMVFjN3d8u/9+8o3PXh0EQ8sDGthNWDUd8QKw8/BKDyuyJFoeI7K/3HHQaEJCsO37qsmL67MuX7l+4qWS7+F2h2bnqPK/Owg9RO9QHnUR8pyCCFqhQQLEYKTaqE2kanPSXwUTo4LJy5HKH+bhHGcjA3FkKZViMpzQYKJioY02xUkBSa49CGR0sfiuobq4xIwo7jgOOVo22ABztQsApnOzgirh9VGlCPIz2iSowxcFXoUKYQQyEmI5ShHJFVCVzhwZZiWEwFRNHafcrF3JDGcnmhBt8ERhZEOr3x/Ap7rgOhQ36hypOD6lerDRZmiX6XsExyNq6m2MM6X+s0QxwEJqo2RSY4cXbXZRRC6yN2ODmWo+kajhmwPLkvwY3t+xHjNRno2q5QjAkXMc3QTlY1YmfZ1WI1URIynSkElCH0XfkudA1Hb6Ji+OQDIUfmzbj/SyKBTJR9Uirf8f+G6oCea5GhUOUpogoQm2NutFKYLl7eP9D47sb8RPhMJqNcBH5O1WD7hbizfNwEhTcjtdAKvuHMF0YrclBqeo8xoZSOKY3hg+GxMyHM2zIe440g1FrP0xcGjd+M9f5fj//MVih8dqNeoPuPg4IFaCEuVDkERY1gM0cpDsPxeCNmHUvAIKZsE4mVkst+eEB6EJEfEyXBkJcGwrwrRhqWUjDo5Elzo+WJypg0hs9EYyEhWW6EKFzsddGbUd0AhBMNn/vVhHUbmijATs+RGAchr6rHgObLZao8xbGttg84GkOSob9ahkBcToUBH+jiSYaHDasM21cqRKApNjvopRZpwCFnMz3FLs5zKOPFilbGVgOX3gKY36Qyt8sXk7p5UE75CUpTkCAY5Crzq0iKQvdU4g8sr5cj3HJCWej1a882kBYOQvhYAGHgBuJZz66DLQ/gMYC7B0b3lJE+THF+64zhiee627bkAAHDg1rtqzzXVG48VcFVfKiFqbTAUOHGQSRIDwfDAgtmTrvz8vu9Kbxh0kURFilpGWE2lLIP4SIICRC4aPvMhQJFRrkMfQKkc+aH8bhvjyuTiQIgPLhzsnmojkEoNEQwLQ0OJMJQjBATw27qODml4hnpJgQ5X7ShCEE4hPBVUBSBy5P1q0R2pc0Q6mIhPgk7OoiVDa4wUups8YIZs1XNGTecij6H8EIAARIqctLAU5UjlTt3lDuBLLxcRNVNz+T515Wi6Hep3DQQdE5oCfIcYZuvKH6XPnypXYZAjiDLDrGpDQpA7lUplNuxUBMTxPbiGapwYVeBdvQC5aLUJCndCv28hC0pS1hy7B4/HuGjHD0IolSmqe52ETHcP80gTc58XBrGpMumqcgbQIXKWVw2DQ8+FL5sGC6PRNGAUwwRwcDFCBE8fd8fhejy81spHcK0cFY6DyMikE0IgMsJqSjnqpuX/dw6m9WOtfBJLw7opWytHqvgqT8C8Flgypk5XvAy2eAwr+yf0OTgyO4e9wwzhw+UYTGU2Y9Xn5cUBHImfjElJmFbSAe49Xs1zptK6cOO38MSjwPNvA17/SQ97lwSI09ekvX+4Tg6pmp+LBMvpMtp5Cyz9bnWAiBHnU6VyJK8hwf0aOTraTTBURIy0wGXx3iKtftN5SnVkYmrblOoeUv6KGuSIqbpsZAITM5PVAyLHn371fvzyx28pNw4qbEZ8vfkQgkJkW0A5stgYbGvNopLMy4u231cLsK9VIUI9TAQqw6gyZA/bVB+DgmOm7euG4SXXklK4U0qeSjnyMzlJ0z7y9AbQ5HpQ5lSGUcOcWUL5iUq/A+VUH0PhwjfSnx2IMqzGCzisUo4C14EfGun7hsqTFgxCeIAoJ/SB1waTXoWiWQBxJcHKtsvR37UbdLpcYEjO8MDJSjmKh2V17GP318NqZnNQn2ZwPKOK8piwWkmOVIydYf+ClNAZ004LP/Cg+kcN5S685bu4beE2MK+c4JKCIVpRC6CPYSBN6gA87gOEIcmZjs0DBLlowdNd6xtFM1WfKuKj4AF2ToVotxSRojgRVQuJKKq2DyJ0AMfVPhw0FIh+WqDNq6KCHk3AvQ58WZ9KiKLWI6sZViNOC47gyGvkiIIak68KUakkJ0eFcAhBIcMLmWx1oJ8jYjC0sRxlutaPIwDPV2E1Vms+K7ioCL78pmYmAlRtcIqqlISRrea5jiZHdeVIfj75iNeqFHRXMOSMG6ZtAm5UoS5qfd+k3y3w4IQt6NYa48gRcRG2feRuW2eEqYyrUUO2B5/HuGD3U6HrChlk3yyI6tEIynPmMoqs4OBcaALIUfmEICp/EzcUocBzELTV9caRGm1IlPGaEIKrbj2KWPtMgO8fqtf5oYrQEh+7Fm7WylHueRgY5CiNhlWFb1TkaDkrr/OZpArX5H6A/q031d5n6Wj5O3Q9mW4ui5yydEwk5MTdiOZDcOogmC5wbGeAVIWIJbFQypEQArlB6zk9ghO4VCtH+08u1rLkTKU1Wjim/54YELz8uxzEjfQxw/nqccD4qRYxVtIVhIUPwVcAI+zfy6YAXiBVVcK5GVbLcbybII7U9xNCOOX8MjSUo7gfQWeTzc1CBEo5EqPkSJYoIU4H7ekWiCpVIXKEguDr95zADQ8sAppY+1XCgyh0mYuc1Qn2ZsCSo8cYtrW268lM9dUa9FVYw9Oyp8s9TITS2JjkUEUgh61CK0egAq5DsE22rciMzaVqV6E6XrfUXEaPQE3YnDt6wdBhNYfDdUhl7pUG1NKQLS9s4pXKkQozaeWIgpjKkUvg+wY5Mrp0p5TDoS09lqE/oclR8ydDh7O49UfehnsueQvYtAw75rwMq8n6J5xvK48tGj2QVJFG4sFlKUSoQgqlQbwZasmJi9ipwgfLgwF6SQGRpmBKOQo9ODL7K5E1hjwvwZuufhO+L/4AkDvraFHF531EgQuVnu1yD4QwxAUDVUoWgEIECNuqx1ydvCU9tRsNULA2dk21EEpyBFFgyegZxbLKv0Na5XvqqEljke0nBXyV1UMCeDRG6k8gdFQPtBwDI1OoGVZTaljemUIoK1ILUq8rVCksJRxd7wc4tiQTBhpVfAWPwUgLS4MMVIYnXQH40kfECYcwixRSDjXBV54jX//WfElogErNAWSFbLfKW6nIUX2T4LcM/xIoClopRyAE4B6UWYMaRTnV78oLgho5SgxfUkleAMBF2PLBvbY+WWr9yJqeI3hoiRh75i6HkJ8xMZQxM2POFQPtd3M5QVow0Izp88Uco1QBqZQjXigljiD0HPhGFXCTHOlsRAD/dMcxDEkV8jqytKQLjQKmedvHZO+7yIPyHGZBG+lC5e8z0/iBKltNKUdhUZmEM5ehuL5eiiaR8x5xpWlYhY6LECM4cTdyWSqkPVfg8I5K4bugW76Pah+Sp4kOr4N0ADBkrIu5uLQxHFgqyZtS9E11ujhZNRQetiYwHQHMS7SKMjQeB2RYCwCKGEvpEjwq5yUn0JuzXiHn+aQkoUL4mJCFKwnh6KZxRUJIC0IqnCsGOVpZmIea27fN7QIJDN9jw3PEdbHcCYRtD47a8YgcU/J3fXKQVeF74oPo4tEFKCM6crjZ6pElR48xbJvYhUoyLy+OeGjUI1EGUO4glGZmnlGtHCVBDtU+hMiY9NxEUEr9puIijd1KOQqluY8xMzvCMRbgynPUqoXMyp1kPazml2ZrdYwQpeeIFXD0GAh810FZELW8L+tXC2acpwizUB8buW1QT71+/ZzxdK78LMEuCK8sgOakwEMLvcpT5Wwrj2WZsbABVHeh9+GyDOhUBnGIsj0LUC2YOTwMjTh5GwkeWBiCZ5nezXutQJMClbLL/OPIeQ6KCHDK3mnRUk9+OhcZWiCSHDnCk8oRrYXVIDyEHSXtC6wMqwUoleoiIS4KNoNdUyHCThXmWIwrApMlCVRPLEdWpHXV99vw//TiHJ4MvxDSgkcTpJ1tCLRylCPuG2UEGqFIVz43CycQSHLEITShKV+jTkCJrhQNDOS1P1J/RiTgThv9KAKT6p8nOApHpcMz8LhBjoQqAskReA5agYdKOTLDatU58BzHCKsJXStJNM6TSY5CQVEwofvfERBA+FoZLIxMPUWOgnYbTqulCbJZQ8gtlHLnIpgI4XkehIyDMvl7KsYYsh1k8N2g7EQMoNAbk6rpbIkBVCkHwglSylDkTJ8v7hA4bhVa1MqRKuwqkyt8nZHIkaWVqsOrE4GHTkboudNQsZkJLOIuw6Bc1b/xQTEPNi0rgXsBBisGeZDkSI1LeY5UlWhifFYuEpCb62U8lGVBzQtCKkegYzqrnbgLxVCGvLdPYMUoHronA/b0L9XKUXxCFlQUgOPKwrQ8wrbhZQCAFdnzUbV1Mn2NdGkJuevgX/7N5bjuiXuQhU9D4eb6GOWxIs5sebza0ciwmivLbQjiwFHkSKbtp7IpLbiPiaAa/0oy1NdOWeBVPs/YWHQXqpDk5MwcSLs8Fxx8tJK2ohROB2HH1yowRIbHS1W/G+cg6rdGAkBuPoQoUHAXkzIistnNZx8ROep2u/irv/orvOtd78KyjPt+73vfw9GjR0/zTIvTYVt7py70p4qkpX1VJ8SFkIuuw12EQTkB8ZxpcpQGmZaiHUYghMDcRICWgFYLPAgQma1WZCkYLRDSklBRIzuCC6cyxilfCFG1etRFT0pvDCtQqSuO9BxVGW5lWI2CsCoTyXMdtHwPVY2VapEdZBEmZX8sAg80bKGQ5Ig1zLYir37sTvZU+X+Cdt7XoT53VhXhy5EaRSSz7gn5uXx4LAWfVKGVss3pMKcAo/qzJMRF36nI0YSI8ODCECJNda2OsB1UWR+SKxROla1EnBRJwRCvlIuCAxcFOpocEeECYEhyrrPVyvd3NDkCEVgyutKnRguNvNiOnVMhWhPqM1OsxNUClCWZXvjcCUmO9A4PNRRJD4Uye5EAHk1QTG3XYTWIAolROLKpHIWyvkoatjWBEESAGpOvVo7Ud6WKbDpApDI2jVIFgFSOnDaSYQ+cqtY5FEO58xWgeOihyqPBqNAZN4RwTLd8GSpShmw6kq1WhtVM5UiMyVYrVTevVSkVvuDSc6TuIRCi2tjoJrdCQG06WhMT8DoTUMqRKuMBAB6rCg60JjsIXEeTI5UhOeI5gqdLH6jikhQufvX/3Iq3f+Y2g6B54F6G6vfslskQOdMmXEZ8nbEHVKUKhKFEBZ6DYEJVO+a6/lI5Rvn9yreY3lvV1drDjuJ2w6CcKNM48REHQ/jby3mKui6G/SoEp4jC9gvKVkdJvw/OmW4yK2plNnJEWeVBonletaxxVEZVKjNXx5Gju5FHcgG/8GJw8xgR44X7fxGpVOKUL8rjji5DIESCdnR5OW5ZJVsV5zV/L2yphxuecCF6QTn2zOsgcQs9BysVnEjSVahwlAyrObIOHJxyTgGAiJW/h1QmZAgRYLoVIJTFW/vZEFxuYHxQ/b0MjNBvT2c5Evz3aw4C7aoeFwwlnnEG6qh6aB2EHU/XtBLIsU9GMLpxoWOChPgQXqVwU+FhslW+xmY3nz1jcnTHHXfgCU94At7//vfjAx/4gK53dNVVV+Fd73rXeo/vvMN0e7tuRq58AVkkd/lwdLYSEQS+Wy4KouDauMyCTF/gLnPQS1cw1wnQFkQTqMChMDtx93vLCFgIISgK0dX3m8qRzm4iAqHnVGE1lPHhIs9QhdX8sl5MleCiw2pVrzQHgUsQuIHeLdOoWmSHRYxJWQHWgYdgcgKq5hl3eC19XNAOiuQGsOIhtPqXgBMXfkKwncoFlbSRTqgfGkVvyYin91RdDg85OEjL0ccFQk5erKoLFAkPA68FRegmRYIHTg7B0wyClOcoaAVwVGhULqIpKkWOuAmSgiFRao8gyHilGhA4IIQizim47pNV/q81OS3PH8eykdGUqQWXOOB0G3ZNtdCalmUfUKCXVB6WPM6ggpPuRDlJuk6dGOixJsvImJrwQvg0AZvdpZUjiLzWtLVZoLMj+0rFYUc3ehXg9dBSw8/mGipFoshRt1mxtyRHedSDkJsIT1BERIUdKT75tZt1yKAWViPAdNuTu1qpxIKNSYcX8F0HrtEWg2lDdhVW8xwHrqEmhKJAbpIjQgCjEXN/KM2xRQL1m+lMT8EPOrqeUG6UOvBY1V8v7HQQeI6uhk7VdTZGOSoIgxACjgxbUMfDV7/3MP7xe0fRVVlFxEPh0Yq5CKcMq+WV0sbcQNdxEoCuc6SIIhFlWC1omcqRWUOo8i4BwJMu3q4L1e6gJ3GnkbEW9ZR/zMGwTTG9Z1s5BiJqjWoVOdp58SUAIRCCI+n30Y0XAEGqzEaJXrhTf2fm6/iqeSsEIDIQUe8eD86AhXu0cnRi9lJNcgGA8xhT+Rz8vlSCTpSboLIUgSJHMfzoCYAAIlmWYZ9s61Qwocuc5FGhvUwAwByCyOEYyDk4Ut46SY6YQ8oWHnmM5XQZRC0cjoC61hKm/KtdOWAPUy1P94SL0oHeNAfBcf29xEaoM+lWCvfNR2P0HXXGWC2sNhgsIpOFQInTQdj2qqrvIsOkVDC7w9hoJemDqxIWogAlAaY0OTrHlKO3v/3teOMb34j9+/ej1ap20C972ctw3XXXrevgzke0W9t0HYlCFVtLqt5THGoBJQhdmbLJoImP36oyPBzh4PjJ+zE3GWCKMk2gQpeW5EYed3ThOALagmAnan4LLtyRsBocoas8l+MoEacxTF+Oma0G5TliBSqBogyrtZxQpxFTI402ymN0MkmOhIvJTgjZQQUMdXKEHGDpd1FEX4VLPSzN/RBcRrCLShmadNDzuvrwf/rOnXrHFg3UbtRHShy47apBaCBIeRzNdPijDx+5E0Bl90wKGVZLYk0gW5223jGpei+RqMyUxEmQFgzZUCmCQCo6gKOy4JwyrFYwMFafINqTMrsGHN1+5bvItRLggBfbSuVoRu6WRYHIaPJZdlOXXpdOOUl7TqVQmTVh3LSHTMr1ICE8GoNs31OF1YxipUAphfsGwdoms+TioGN4UiioEXZBw3PkSZWCE4FIvnZsqIrlgzGY20IW9wDpiQpQoC9DykIUoN0l3HSwvAZYUS32IATTLR/EIZqMuJzrtPh6WI3Adar6PSqsZvYd81xifDbAB0PBeKWYgEDwSjkaypIVmdGiYXpmTvZfk76k1CRHynfmwG1PIHAdcFcRDkmmaJMc+UhJufg6sugpdTxsky0kuv0qA4451MhGK5WjNM2hPUdeS5cqEBC6zpEmRwQIPBehJkcMWTpKfpXY9rRL56CbY/Mu7j9hZKkOyuvU5QL9NrBnzx75vgzMINQqrDa9c5f+TcS9Lpb7hzCRTetaWER+d4OJWQg5Z2hyREJ4vKgKYIoEjjDq7gDA4DhEnqCIy2vyWFh6hxzvIvkcSVjkRi6WviBCQk2OwGIQNoOJfAaJ9I/tChieOLwfLqc6FE0ydd3Jcy0yUDaFviw6m0h1irizUArjMAmAIsZyslwV7fSYVhYzedKTrCdfM8BUy8eEX5K5aaMxdjp9RM9rpnk/0QlBLjIEWNCqKa2F1fq9BRRy3guCEMQhEJ46tzk68vfXM0tKEB/UqzY0gniY9h08Y+UWHLz2K7Xq8GcbZ0yObrrpJrzlLW8Zuf+CCy7A/Pz8mGdYnAla4QxUeSG1MBbqxwOCQv+QAd9R9YgElBIQBh5geItOnLwPc50AO4oqoy10Czi80DUm5hdPwONt8KIeFuUww2qqCKWoNR9USIzJMCM+fNfRRxCIsrcap3p3w+HAdx0EnqEcGYQnphHasg+bCwfTbR+54g6E1vwk+oSJFILNY2HnDwMAdmaSPDhtFIMjUJf7N//ldnz462UqbxIpAuUjcR1AkgUIijbniDIGVlRkIiEBMtfXnqK2KLB/YYAiyqpCaZMdrX4wweGAo8+Mc+umSHKmJ2kigJRP6Ng74EJlq2m/hlywJ6fVzpZjUCNHUkUkwIQ7h3bgIpzZJp9aIDF2y9mwOs/+ZKkueU6l3qiCdjnl6LAqrEaID5elCHfsQ+BWylFhmG/jjGHSIHSz8r2GXqsiEIKiyAv04gKDJNefTUCU6fOG+TdVypHhRwPKhYy5bXTSLtR3E5IMS4UiKQVm6QDHe+XzGeV6hywIwXS7HlYjgo2YmlW2mqsNo1XtIlNg8xwC1w/1a/mClXWHTM+28KF38/K76hpG9tntswhak/o1CqNCsCOqrDC33YbvOuAqrEYcCM6RN9uZwMPAcZFRhkCqWsx18bi8XLy7koQQ4oF6TPuSBErlKDFCr8xrwQ0qhSVvhBaJKMNqoa7RJpAZv0/VP1AQ4IJtbTx+9yQUAejQCCtGmDuRGwaXc0Rt4OKLLpSPFGBGo1ilHLVbbXSmy9BY1OuiOzyBXf0dUPNhe7b8PUdhC2xQkrCKHAVweIRE9QcUCUA6WDGrxCfdkhgJAhIEOCb70zkyy42IDEIwOKkkR9JmIty2DqsRVhKQyXwWKYvhCIb0n/4MLzn5Dfyb4X2IMipDevJalMqQECl29y/EUlT+zlNVxNSZ1MUTB2kIIT1HWjhyCzD5+yzkuc8ySUC5h50sxhzKse2Tyg+Bh+PTx/T3kmfV9ZRF6hgHGXwc1D93jsIov9JbmgeXHqNQJUbokFmOlryehkq1hANCXOROpRwBwJzD8WPd72L+mk9jM3HG5KjVaqHf6O4MAPfddx927tw55hkWZ4J2e1ZPfFT+GJhKFjLIEQC4UBd8tRi1Q5XhVS4wJxcfxtxEgFlambYDV8BnCVTNpJNLi/BEC5zWyZEAQZQVcuen6segphypBSAx0qYz4pWp/AZ/SikHY1mlHJEyWy10q0J8ZkuJLO8hlOXxXbiYbvnI5NtyUL0LlHdosOIAok5p0N5WlNcpIR2E0TIcmen3748cwHcfKieceCglfeIhchx4k8qbQDFTpBhmFHk20MpR5oYI2iFUWK0lchxZSRD3Y6gKTBMTk/BlgU4uGCZIHxGr/BIqrJbrz8AlOVILEAGRytEIOZK1Q4RgtUKBmQwtCQfYPVH+DoO5HfLYAtzw+KTGDtxT5MhXKbcCsZT+e0mBbYh0XzUggOMWmJ7bDZ9UhuzCkL+jnGIHrSbM6aQc/9Bv6+arQlDkKcWLP3QtXv2n16OCwGwnqJQjCCRKOVJEkqjXiMG8FrYXKiRD0CIxFnQtYmCSZVjoV+RIEXzhuJhuqbCaMmQb6otRIsE3KmTLF5L/r5iP7zpwPB8mOcoZNxgUAYSrlSNVomO5W10TMzMzCNpVWI0V6vwK3dKGCALSaiHwHL34cYdApOmI5wjEw7ITIi04QkWOHAcX0tJT2NW1qTxQwkC06dxBShnStCL7zJuAF1Y+E32eDEIWeg7CdqW61BsYK68gwfOeuBN+6OrwTcgydONcFwJVC7HHKNKWh717ZvWrcFb5hpRyNPjAh0AOPgygVI5W0iVcuDwrz5eP6V3lcwqSg8t1SxE3QkIQHqNQygVPIJwOHjxmlBfI+pUZ+4IL0DtxXJ7ePdX3K2KE+QQKXiCRhFc4E1o5IrzcxE5lc8h5jGevfBfZ/CEAwFyxgiin6A5Oys4CgOPJdVQk2NPfi8HwMDjnyHRKaUf/DoZpCFHEWM6WdZ9G3ytQuOV8wLj0oublGLb3Of6fP3kb/sPHSz/eTtW5QLi4v31Mfy+0qK55tflxQJAJH/NG30yVVAIAvZOVdaDVVsU7lQqWw5fX8VBdG/K9clclA5S/1WndDcLcmJx9nDE5+qmf+in87u/+LgpV44IQHDp0CL/5m7+JV7/61es+wPMNLX8CTPo/mOrYrsxrQCUBQ0DwQUk2CNX3tf12WdxNytYnu8cwNxFgmjKd7n9Hx4MvEq0cdfvLcEUbgksyIScLDgdxHEnvk1w8CeqeI7ldSY1iionrIfQcKCZEpImTFqmeUJRyFHotKBZlditnxQp8WU3bIw6m2x5SVWsFDIXcBTLOYPY448VBZOE2AMBkoaT1NiaSrs6U255R7PnON5FTjlSlrBIfkeshnFATMMdUkSDKKPJhF4qBxU4L4WRHZx5NOgxCADffd0LvfDqTMwiU+VhQdIJ68bbSkM2NApS8LL7mqzoxBIQUZc86I9MHAKZnVRo0Q2pI4oWqW0ME9s6UE2cwu0M9CscIfeWaHDnwJ8rX8wKinz9My3PbSwpsIwPkMvOHkACexzG9fZehHBVgZkmBnGG2qDxdaoLpOSG8UC2eDINhhoVBhoML1eRKILB9oiJHAqJUMADEqo8XkQZa2V9tRrXWICFaJEGXV4vppMgwr8mRQJWa7mK67YO4lefIMYzGCoKUyhExlCPGxyhHLoHrVzWTQk5LQ7b6XKQkYYocMRmaW1pW2VcOJqe3oRMG2vujMsFyxuFJ4uAAcFot+K4DJsNqjBDwLEPBR8nRvDuJjDJ0ZNNb6jiYZV0AwECRI+Ii96Ab5woQZDqsJtWHsANX+8UMQ7YOLZZ1jtoTVYo+NZRKFVYTIHjyBTPwA1fPTz6l4KKspwVU3jmPUeSBj9mZinBxsk2TwEi2DgmGEXy52H7v3kM4PFjG3LAch098bNtdFrvlSFDIelyZ8jaSEBAx0FIbjgSFN4GHDzxYnce0r83Y7oUXgEry47hz8GTauuAxOsUUUpoikUqKcKcrIs/LzzSZzWG6G+Ppvdv0y0/RAaKM4uTRByq/jspyA8eO4W7EyWGkJ05oVabwPN0pYZB3Ss9RvAQufWgtP0WiknWEzPYsys/8w0cG8PMMj7+/j4lEYE6KZB4neLhdbWo4M8hRqtrEEHA3QORVG9q4a/jAlhb0eW3JRI/Mld5WkenWTLFWFX04IkMmN5qKHE0qj50X1rytZxtnTI4+8IEP4OTJk9i1axeSJMFzn/tcXHbZZZiamsLv/d7vbcQYzysQQrSfQJEjbZUAkKkaOwQQbAgf0F4iQTja7iQ4qhYiK4NlzE0EmORcK0eLgZASsiz41e/CEW0dP3d91SkZSJMIuWGu5A4plSO1lsr/m9kphQyraeVIlIbRUjmSu0gdVmtD1ZoxyRGnPXgyXOY7DqZbPhJjB5/K1N2oGEIY5kvBTiB3AwgAIVMLagczgy48uX5kfogfnr8P9873keqO4T76foB2a5t+rRkeI84pUkOhiZ0Q7cmO7kV2Qasc4+e/c0CPY6Izg3BCGd4FOl69eBtxE6Q5QyGL2AmHQQgfRDUdJkBAMlkIU4UkyvO2baZqYGoWRlSLo3CAXVPl9xpMqkycAq7xHRZaJnfhy1CI06rO7SAuX7dSjuRjxIffItg5O1urX8KKiipEOcV0UfcHZY6HCC7cVhWyTGOZLafCtSivt7mJAJ7yKRCBVPlEpNriqorGMvW6rZrQkhY6ToIe5qBCAz6jONEvyRUrqtR0RrwyW80h+tpzhBiTyk/guwSu2v1CAGxUXfIcB64XQF3wPiQ5MpoVmxC89CR1uyoz1IU/0UEn8AwCVb5+wURVZkEApNVC6DlgrgqrEYgkGa3uDQ9HvGmkBUenXS7+3CFoi/JaHg6qDNjc9eEGFTEvshRpklfnK5yqMg2NJtJmEdLQcxAYTWGzmnKkiJyDXVMhvMDVqoEnP6cKralUfp/myFpteJ6nazARMoP9i6W6nQxKonN83wtQhKUv6cFr/wWJU2AiLa+zgHjYfsGF8pz3ES8q5UipkCEgEvgTUm0UCajfwYkjDwEAvn/9N/GtL/4zbuWvxuELnodi104QwcuMYTKJUJ+HGG1FjlR9KjKjw2qqwe1UNotdS9IfJ4nkFB1imDGsHD+A1FNm5hntw5xOZpEk8xgeVRssH4tTx3TK/cN8J1KaIOov6ezEdhAjapW/D02OaPl9X3Yywb1PeB0OXfhiPOPA47EjKVX2gOaIWobYzyrFpqqEDrzkqRdDoFLOo16l4EddZVEI0erIIrhEbRJzEKlSpZIAExLAEzlS5aNVylEm+30aDW03A2dMjqanp3HDDTfgs5/9LP7n//yf+JVf+RV8+ctfxrXXXouJiYnTv8BjBH/2Z3+GSy65BK1WC09/+tNx/fXXn/5JZwmV2VL+3ygdkzjVRcuLqDS/StLDCUfbnQIF0zuzXjzA3ESZraZIVOEKFIhAZKy3rMYbAPKH5LYm5PsT5EmEzDBKc8eRTVTVxF8iM5Qj5ri1OkeQu82CVbF8Bhe+5yD027p0QS09n/UgTwMCudNPiAv1o1RVmYfRMoSoF3Zk7BioN4FAsUqng+29Hly50829ELuSFdx+pFdl1RAfXS9EODGjX2eSZhhmDGm/IkeJ62NiqgMiXfOXTHl46/N+QMbTy/ebmJpDe6oNtUi3nHrxNpXKT6nqA0YBHgCBMiYCPslL5cjI5JoIXLQ6VZZhZoQWuS7ZzLFnpjzGn9ymHoUvfQ2AylYDAAeBVBWcdlWeYdgrd+X9tMAsGVT1cUiAoO1jz0wbTJmURa780GBcIC04pmg97B75bSQ5g9dSIS+qK+m6WvUrSz/smAwrcgSBRBaLzKQHx1FhM5FCCAZfNfwkHbTdDAXv6IXXFwwnpHKU54UOExVOYGSrqYrto81bBUTpJ1IZN+BaydPmVxCpHFV1jAKlHBnkQb0HABAhsDjMMOjJUgzERdAJ0A5cvVNWqkxOuW7I6wjAabfhu0Sry5wAPM3GZqut+K3ScySTDDghmPHKRTKRrUSIIMjcUCcicEKAtIc0zaCu57w1UdUwIgK58kOpS46UPsR2y2hrkprNdcsDC+Ji51QIv1WF1VzZiHpZ+nyUmhrQDLRVEghXLpLcmcSdx+4Go4XO5D12wYvQnfsRAMCelSOIHIFQFnLknGD73sfJ8zlEtNT0HIXgJEF7akYeE6PwOlg5eQycM3zjr/8Mt3z3+7grfDL2X/5aLLVLVdlxtoEQgpba3PAY7Xyq7Oum6jQ5Hd0fjjvlJmcqm8OUTK6Zk9li00UfcUYxPHEEqbQrEGcSgSTtLTqJJF1EpIzeThv91pL+bhaxDccIw1Ti6/m9HcToTihyWiBlHjJZh+nCaDuO7fu/8OAP/DR+sPs2vXa0+FDudJW07+iLnMl6ew4ReOOPPxFChLrsSCprrR1YjPDwIZX5GyKQm8MelJ8oA3TbGTn/EB8uyZGo+YWX90/JgrXcPcfIkcILXvACvOMd78A73/lOvOhFL1rPMW04Pv3pT+Ntb3sbfuu3fgu33norfvzHfxwve9nLcOjQoc0eGgBAeNJPACVfy/uJQOp6UH4ilg/hG6SHuRyBNwMKoclRP41lnSNH/xAKjyMhA+jS/HFUCxOotGQBgSyJ9e4dKGX80HO1YqQEBFM5oqQkPmYqf045KE2rH5zjlqn83oQmR9xggYwP4MgFIvA8TLe8cochx6xqBHVXlnR1W9Iuq91yeghpuM0QAFrYvTyAp7L/PA+74hXccbiLXPcCc9ENW/A7M1A/iw4rEGUU2aCrx5UTD1MTrWqHn+Z450/8IHaFpAqrTcyhNdkBkTtHn5RjvayYw1QsQNwEGWVgklVQJ4fgARxVJJEIeCRDnDOjyrLAVMtHaBQbrFWAVh/WEdg9XU5IfqfyaHRSX9dYqWRyF61QKkdtH4p4xoulotFPCsyQqBZW60y1sK3j62KLQK6VzUT6ZNqsrhz1wknEOYUXVpmAulWIqM6/gMCOyQCBXylHqv9cTtUE3a7kSpHAUQUqnQ58X3rtlNIAoclRmelTHkvdEDNtXzZTJeq0jTaeBYHrOkbfNAGoNPaackTgeaEeV8ApcrMNSyM0QCAw30uR6OKZLtx2iE5gFC6UqmnBuFZbiQBIGCLwHFCVTk0AkSZjG8+uBCEyynUxUE6AKci6U1o9JEicDkK5seUE8AdL0rQvf6vhNPyOCm8J8FzVaaoIWeg5ZcFJNTfVutursgMedk214PmOVj4c5gHg6MblNakqdwdFCjZZhoVd2TIn9ydx6MgdhlcP5TwnC9oGIsNUug2ODHcNCo6vHqiSNQZL5aKba+UoAEeGqW0z+hjqd5AuL2Dl2FH9Piy/BwDwUK98LeLOQjgpWqq1j4jQKUpylKums04baVAYvqQUk9ksLlqS3sWlcm4IRQ52/PuIjh8F9dTvbAIdZSHgGTAkiE9KlZG00Q+X4MgGy5S38LDvYSpt603ilBuhN5FAbc66xQwSlsFlAtto5ckjxAHn3XK4KuNePcYd3Hm4VLyZJDUOBC7bsw0dr6UTUhKp+L3rH+/A8fkl+bohwskWhBA4yRQ5ysGLMuHC46qXmw+PUMRChdElUZOFMqn2YG4Oztjt9Md//Mdj7yeEoNVq4bLLLsO///f/3thtPfbwwQ9+EG9605vwS7/0SwCAD3/4w7j66qvx53/+53jf+963yaMDuMcBuFWsXsfsgcSV8XqRoMhiBIZyRF0Bz92GAgIBKUNL/bxsPuvBATfIUeHEcHm5ePIk0Z7mMPDhBqqSMZBnERLdB8sFd72acqT+T7WZmoAr5YhU484ZB2UZ1M+PwoXnSOVI8ihhZDkJHoHIGiSh72G67SNDWBYNE1X20sriCiDDgcv+LswmSxBsBVlrFlQkAAHSgGN2KBAwCsABdV1sT3q4+9AyLs3VrpygF0zC70xDtVVtcYphRpHlVSordYCZTqjJEZXK0/ZAQJ3FYHKmbLpI2gD6cBAhKATe/afLGHgMb/mFBGnBdco8dXNABHB16ELAIznSgmFShdXAMdXy4Ler3ZQwauFwIcqTaITVyvTrcjfYyl10sy6mgimdCVWWg5BlH0IZ6hAFUunnSHKG3RjgmCp6RwJMzkyCEALqKaJTQMiVOpbeoxavK0efu/T/QlIweIEidgyucBB6DlxFTokLTjiecuE2+PepDBem20lQTY5cuO1JsHhQejl0Jl27TAgDNDkSomxVwLhAlmdQhuzUCXGh7oWm9oek8tKgum7LxrOV5whcElZDqPFdp+Y58gWTnqMmKVIvw3GinxnhHQ+e76ATVJsOxb1yyqFKUDlCSM/REJGjPIACLElHeqsREAzdAGnBjErpTP4GgCLNEKK87hO3g/aUIkcCYbSE3K9CZEVnRiuMAOAor5vxlqHnwnOqiuPMSPFWJS5yx8f2yQCEEDiO0SrHjXVYjRblPBLmCchkWTjRD1vIorKFyLH572vSQuCCEEf7bzI/wA8dvQBCGqAjt4W/uWmIXy5fGSsLMutLe45aKJwVzG6fwzzK8Fjhd1AMlnD8gfv1+HlxAIInWFyqqu3zdoRQXpOCy7AaS5FzDrgEhLQRhQNsIwQcAkLEmMrmwGRWSTpxOZAfB0SGp37jP+PG/N/Id/PAWiFaRSgbRCdw4g5ipkJWLQxaSwhbsyiSsgjoId/DVK8N1Ydyyi0wbHEQ0oEQPXSLKWQsx8WLLtJWuYHctXAz2vFxPDR7HzgccNUvTbV8EgTv/8r1+MRbfl6rmA5hIITgom1VKYE0ZUgLhu893MUTWFbeT0LwVoiVuECPKD9XjiLnmJ4M4Pfl5oz48ByGSNZiUpt8V6rFBTnHyNGHPvQhnDx5EnEcY3Z2FkIIdLtddDodTE5OYmFhAZdeeim++c1v4qKLLtqIMT8q5HmOW265Bb/5m79Zu/8lL3kJbrzxxrHPybIMmZFJNS5bb13hMwC+LiootE9HIHLUIpbAiRP4INrrUngcLXcWGQEmZL0XRoFcRPCIh0weNx22kHsx2qw0K4o0AZd1azrtFkRYdc1mWWzULHFBSVBTjtT8mOtjCBhxEHjE2DALFEo5UrtRx5FhtUlwIoMqprFaxCBiqiQpgY8LtrVRsLYOmcQyFXllfhFqpTogZjELQPAVJOEscpSTaOGXfckDmgNogToEDgTooXvBZC8SVwD9cAJhZxpqGQsExVJOkQ+qHT4Dyma+ggAEoDIeP+0wqOXAn5zB1MxMuWADcFiB3V0gTChCAHuiPoaCgUvJJfdyCO4boQ2OEBninBrKUUmOyuwNKX8b5EgVBYRDsEdW3yWEoKyqTtEqXPSzPjDV7KYuO3QHpbdDCCCTptOkYNhGIhR8Ur6ej/Y2OdmpFgQih5A7vzhngBAIjLDa8GlPxjUX/1s8NWfwfEV6KFy4ePXTL8Q116l+WS44BJ51yRwe9NW0xHQjUqpaVQgH/uR0SY5EXEodKMlRpkgMURszB1wAi8MMSVYpR6nbwnRbefeUciQQNw3ZKA3ZniHv+4KBi4Zy5BL4XqhJtS8YcsoNkmVIqPKFFwYpCsMY77gOOoGnj1F+pZzxSmXjQmerdVVzXsGQD5KRkKDHOeCU1etb2kxN4crrXZV+IAJIvSm0pTIkiEAQdZG5lWeIt2YQThqKQybrqxmbtsBzysKnxCkFtry6NpXCJLyWbkjtuT5ylJX+idfHSpRDCAGuQ+8x2p0LAJStVQAg99s4sHI7lvsLcqzl99JySldPEga4ePkHAFFmTe2+cB8u3rsTkAGBh44s4rmoK0cFodi1U5IjnpTh+GyIh++71zibHDy7B0wWlSXuLMRMjoCqVjIR2sUkkiJBofeMLUTBScy6XpluzBP4fAcE98EdgIV7QOg0BDuJuwbPAe3fD2A7iDMJtq2FNm0DcR8QMdxkColqHOu00Q8fxtT04yBLiOH2MMRk2oK6vqedAlELcEgIJsr+aik/gX979148PHEQDk8xSY/i8QevQZfsxInpaaQttWkta2sTQfDgiYfLOUiSI0/OMY+fm9Uh5CzluP1wFznjaHEGwAUhLcynDHO9BENX1VrLUBTAbMeHD7WW+vBdjogr5UiGazPZp26TydEZh9Xe+9734pnPfCb279+PpaUlLC8v4/7778eznvUs/NEf/REOHTqEPXv24Nd//dc3YryPGouLi2CMYffu3bX7d+/evWqdpve9732YmZnR/zaa9IlQ7WAZ/uW+hYocESBxKkmaJ6n0HJUXW+4J4P/P3p/HW3bVZcL4s4Y9nvHONU+pJJXKPE9ACBCIgIDajN0M0tKtqNgM2kTtFvAN/uwWbOVtfP0gNu3U4vtDfXnVbqERFZSpEVowAQKZKkkllaq68xn2tN4/9pr2GW7dW6lb43o+n6TuPXefc9bZZ++1nvV8v9/nyybRJ9AJ2V5G8fjy4yDU0wrTbVOXoMc7INIrg3d6OmkwrtfApB9NAYGs35GmgaUMm1KvVI4GQgVZovKFKHJKpXJk/t7PC+nbpD5LWcrv+zXkahdskSMB0/8rinzsn60jKWIoPq/8b5aOyOoRBDjC2vLJfSzEKrvQQ06lXCvpi0CGjMe4tvsAsly1YxFY9GuI6i392TxR+hwZ92mGjADt2NMfrZBEI1A3NghYUEN7akI3tdx+NMaBZAu+t/dleGj392HP8RX000LnwPR4Dyh8bcgoUMAv+uimhbYQEADqWu2Q8ntmny9zXueaJvSmEsf9rFSOACDT3a4JIlmZyEIPSoZPJfnvpjkaYgWFciWFj3BC5hWq/CGRQDVDXk0yREUXapIGgP5P/jxACLpprr1yBEqn9B+6bodOyC4dhQl2TETg2lOnQJqUi6ZS2QihCJtt+edVTY6o8LDKlcKlrADKvz211EOvZ8JEHRaiGSoCpnbKI0r5iUzI5mY8TOTIKpVhBIxSeMz4GHGpHCkockRMjAVPLvaQa3Jbjje2c45U7k5eQPNeUYAGAXxGddUqkKMr1TEbVJ6vfpqjUOEJkQGZCsn35GsCWdBGXFNFGAJedxGpqg4UBCSIEViVaDTpDJynMqxW/qiKK6RKnfa1ckRC8xpKRaSCweMLmO8kSHo5hAo1YwXNRpkvFMj7IuE+gl6CP/vW/1O+nmyEfEf0u+V7khzt/qVaOXrR3bfhL99xh1YSe90En/zfT6CvwtEkQI8W2L6tXAtEsQJBGWpZhscfKJUjwsq/ke5XIVQIirYRHCgQqPtIdEDBsHRkEbkkrYRG6PiLoLLIgmXHSksNqpTOBigp76UHu5ch09Y/dWDSRySTxEXRhddvoSM3aISUOUdN7WGW4++jEI2uaWnSICU5YqJcA5aTCHmfI0xipGQZaefTIFvkdSDn0yfk7ykxlICxeXzt0QWo+8aTEub+6Uk9/yVJgS8/VKpagU5CC/D4ag+HF3roM9kLUiRIM4JW7MOXuUWEeOBcINMV2FKJk6HVS/NvA8um2fDpxobJ0c///M/jV3/1V3HRRRfpx/bv349f+ZVfwT333IMdO3bgP/yH/4C/+7u/O6UDPdUYLBEUQowtG7znnnuwuLio/zt06NDI404ZfDVJZ/joZ76ld14ppSg40eoJ6aWSHMkyWK8ARAM9SmFKZQkeWz4EUJOwd9HMRcjDTPcViruZdpStt5tWeCdH3u8g6Smmz9CjvqxWq0pHuRVWK4kPta4ulZBtlKMMZegt8OrI1Dxv5RwJYXyZ4jjAjokYBW1AVRd15QTXnS8XciY8rHg1ZKycVBYDuTNmk6DShC2U3h8QCRK/gcs6j0DIXQsvgCVNjsrDKEQZVls1nz8D0I48/dFU7zOuFyeGfl6gNTUN5pVNcHccCXGgsxeP7H4RHtr7Uuw/NosszfVOqed3IYQPT/WmQgFfJOhaypEgQtvq6yTiotCLsCJROaGYrlvkSBIFL+NYlC65uXQ4tsNqLAz0IpJJVa6X5KgLEx4hAvAnyp0gi2USq0gA2dOpm+R4SV6972vTZVlyJ7HCaiIHAcP2doRtNXUmaZmLQojJ8REZ8iRB2rPHQFCbLEmnKJZ1folXEBxnUs2SIRtF6p5a6ldy4lZobJQjRVwIsdQXWQkGlA1VrZYOTGQlETG8AB4lMqQ0QI7kMcqczwSiBZ5a6iPT4c1ynJFVBarIUZIVIPIzMqkceYyakIPI0FmuFiQA0M2C+1mBVClpIgNk53Yhw1dUAFk4hVrNOK/z3pJ26CYC8D0GPzBhQ9ZXK7khZMYRX4XVyuevdI9BWQJ4VrGDaiUDUNT505jvJOgs9qG8wnpBiqnmbgBAWFcmlhTNjo+/+u6n5Yn00Vp8EPsbXzekjPgQRUkkpq69Th6n/NKAP/zyo7qUn5AAXU6wc+c2+XFWIUSOMAWWnyjlJi+6AxBAQpYhivL++ewlf4Grbr8IIVdeZiUZW3lYLuSCAPCx6i+CyWpQnh3XpI0ICpAADRmeTPM+UrJdjqkGbyZA2FT3VxdBfwLL6jsmHD2+gvaMvAeQokspokQqwPBQEwJ9D+CSHB1PIvBlQ56K9AEsz5Xna1neB9/eVX6Xfa3WCDC2jD/48qN6Dgpkb8tLZqehLtReJvBl6UKvKg8JCfDgUhf3HV4y6o9IkOcME7EHDpNzRDmQyfzFQkZKCkncZ3EUiNo4U9gwOTp8+LCO/9vIskwrL9u2bcOyDkWcXZiengZjbEglOnLkyJCapBAEAZrNZuW/zQQNlWSeYunYil74+oSC+abSg/TK/l+K9PT8AihqpbokE5e9jOKxYw+goD4gL8ot0xfBb4YgtPwcXg4UeRmPr09NwbcUDJF2jXIEgi4Npc8R5DElTJdvAiEbzxJS3c32077ebWaEgjMKP6gjZdUFHgBozvTniusxGCXY0pyEkAufCnP2FsuxeQVDj/sQtbI0dZWXExlhUwikYhJ40ihRJEj8JnZ3n9L5MkQU6HMfcaOlc6WoEKXPUVfFyBkEKcNqWjlSHcq1UyfF946sgtcb4GiB0NIPpTYfoMjnIYplTBbPBdJCk9rVoAcUHoK6Ikc5giKRbQUUOSq02qGMAnlelttXzp3v69AFYJQjnlMsJpIcZUY5ClmpEtI41OQoXy7PbTdJERaGGHp5Ai6TV/1GeZ4h+hBScVpNcnxf8QXYUIahvTSvkB4KjlbEsaspx0oYfF+OVZeVFxBpanJEwAAUaE5JB+FiWasqUdLFUShyJD9H6cyCJ7VyVGKFxWhKFc726xos5Rek9O9hnh1Wy5AVYjisRj2dh8ZE6VhtjiDyfpCvLspE8cwKbwIApURx/4pypImYKEBoeW8lujdijt6yFcKSoFDKUWEZx6YQuTy3siEwKwTSeBJxo6HeBLy/qIkbBYHPqEySlyqr9Jayz4GpTpX5I3KNWF05rhVgvzlpjg/VHEMQ8WOYX03RXU70PdEJC8zWSwIQaNf6FHMr07rcH8RHu/MdPM5mEDGpOuRHUH7zQENeJ6CGln7j8UUr5yhAEQWotydkuFpAFCvwCgbkGQgNQfh27Jo35zVlBR6Yuw+XT1+unbmV51b/CaliEw+EEHS8JfjyGJouaNJGaAOEEDR75f0oiiX0vVn5tzqiuRhxuy0/cxdxfxKrMrexIH2AALPbt8vvoEz6DmVvJQqOSAiAEPiyFcqxPkW0KBOjpffSU08fxs/9C4aeDGE/EZbnpEvNvUf5Mv78Hw9DXYCRJEcH52Y0ie9nwFcfkeFGdfuQAN8+voq/+taRstUSACBDVlC0Qw5fKHLkg3Kr64NqdKyINa0B3Gz0Tjc2TI7uvPNO/Ot//a/xta99TT/2ta99DT/2Yz+G5z3veQCAb3zjG9i7d++pG+UphO/7uP766/HpT3+68vinP/1p3HbbbWdoVFUIPdFkWJ1fhQpTdCkDszxC0CsQ5xkU6ekGAqKI0GFV5ejxpx+WuQCy1HNqO+LJOgjxTHPErIzT16Zm4dWtXjf9DhJdeULRJQPKkbxLCj3Ry518JawmdyVZosM/KWFlWM1rlOFAmNAQABDBdbgwkL2T9k1PQkhZup/0sdxLkUq/Fj8DujyAP1ES3Byy5xGbRE02P+W+2sonSLwGZjvzOiwDlA11g7hZUY5WkxyZNjksF4dW7OnyatVOIlNqDKG4//ASaC2Gl/fB/DLR8ujxJ5AsfQz9pT9AFh7EnrSA2lH3gzKTOlSLAHJ4IsNqkpm8FVmtpkYGAKwoO1yX5c+ykazlUgwAVH5PvKDa+Tq33J1VzhEPI12Bkqt2Dr1FZIUiLz68rAsqNwbxhDKqyyHkeLr9FH5qcvMAIPZMPpIhGRkK6iEQObbX1bXOEMlqLeW5A5EDaYq+9o4JIEiO9oxy/l6Bsp+odZfxFMpdujJtzMDQQBdHlnq6RJwIghUeWyqcUYuHyBHK69iz/Fa4KJDnwrY5gsdoJRmZi6waViMEjYBbmwWBf3piseKyraFyieSv/czKOZIE2GMUPaGamubodUaQI/mkXpajL79XITJAdmlnMreHiQJFfUKH1QCAJysmL00IBB6F75k2P4ocaYWNAIFyzVfzgkz8Xl4+DnVtNibMpjLQOU6A7y3geCfByrxpxJsEwKRUQHVDW5HiwMI23Ni6VZ41H17+Xbw5+xk0Z1WlqpzHNOEBwJV0R7Dcy7AqqwQJPLBmC4RSNKTCiWLJtPGgcyCE4JLDT4BKZX05zrCzuROtoIV4wtogiAJ9lbAt/eM6/hJCacRKsmVAKkdgLQAFZtJyrKJYRqJcuqmHWtNHrDcAHXh5hI7c0PT5KqKCYGa3VLuQIUwjhJkMUxKGUJTU2pMVCp0sw+RRqWCFN5avM7+IhXqZJ0epwHEmk9hVKw8IULYMkFRvvGpe+e/uqYael/p5eW83Q67JESEBjvRSfP3QglXVWn5/7dDX1WoEHuBRc4wMsavCyyOkOez8fhqxYXL00Y9+FJOTk7j++usRBAGCIMANN9yAyclJfPSjHwUA1Ot1fOADHzjlgz1VeMc73oHf+q3fwm//9m/j/vvvx9vf/nY8+uij+NEf/dEzPTQAQKLn6BS0X/buAYBVFsALKHRjwqRAnJuGsn1fQOQhVhnX5ChIKI4cK+VedZzfnMXErNqpSPm2KNl/bXIGge7mnoOlq0iUciQoutQvlSNqTegACq0mymRmTgFWPSbJejBlvWVYjQc1pJ6pEhJJgrwQoJkH3R+pVY5x/8wkcvmaS70ernzPp9CV5CjICvSZhwm5o1IgdBJNaZbY8VXVTqkcNbodCEmOCogykdlvQFAVdgJW+xkyXe5f3i6tyANVeVKKHOVGjfnWk0ugcQye90D9A+XfC9lDTKwCYhnP6pjVtScVk1AbRxbw8hTLvUznYRVUoB6oEnVlXFgqR+UCJPMCau3K52fK5LAg6MjEhsLKT9HVan7NhCZ6Mkekt4BEWO7YWRdM5jo0prfo1ygIRZFlyJeewlJS9ToLJeHp2soRCuSUQ/R62BqrhYsjCCQ50mQkB/JUuw4TEkAgx5RUeEW+CMiwX331OI6zZnltyt1wQQhaZAVPLfV0aJiAouvXrRY4Rjka9AoSpCQiqnErADCRIhvI7+EyrKaudipypHZCNiFohLySpjffSbVVhd3lXTeAJQSiKJDmQlfME1UNySm6qqkpMiTdEd3L5ffeTwv0C7PZErLy0Je5PbQoUDQmENRN41yW9rS6SAD4jMH3jXLEpGeOTrSW6lJ5vBy/vB+Wlo/r121pd3fAl0pVQQAPi1joJFg+bqINaSAwIY0EtQGlSNHKImxLrirfi3g45j+ObfuvQWvPFeXYeg8AAJpzW825kBxJ7YNUlSAvBPypcr5oTsvilGLZauA6B4gO/KyLbvw0Hty6iq9fvIDLpy4HgJLAqINFB9mi8dwCgI63pMch8m5FOZrmD6EdyBzIYrEk+gB63EPsM9TkeCBtSlQD2Y6/gJZgqG819197tQ5fVvZyUl5NEfUBdEFoSeDirrzP+S54wQQAYPeTkqD6ZThXCIq+ZxLvPdYDpR3oRG95iTBKtLou94S4ce8EhMpNJAE68u+XbG1rKwohErR9Bk+Z9hIPwmOWcqRSLsov6u9mjuFpaUh7JrDharUtW7bg05/+NL71rW/hO9/5DoQQOHDgAC699FJ9zJ133nlKB3mq8epXvxrHjh3D+973Phw+fBhXXHEF/uIv/gK7d+8+00NDJ8mgNu5CZGjnmZaal1gIP+QmrJYK1FiuFSHBBZKUoUOZTsgOUorj83LSUeSoFmNu+3Y8DABsAshNiDGe3oJQGiwKZOD9rq6qIaDo0gCBx7BCqjvcwkryBSCTRuXbWsqRekZKVV5ShMRTfdsE8uVlJI0WeBIA6AIgCKRacensFP6JFeAAUplTwWVPMT9LIQjF7ov34LhddMiamDv2GADgeNAu73PRR+LPIj3KoFqFFxDYNRmXzV+pURBKcqTIUnnzt2MfVC18MjlXJ8QSgm89uQzi+2BZF5RtRdGi8Bd9ZASA6KHIF9BgbaiORT3fA7pAVDMLlJ9nWO6lAAxxVGqHJkcFwWI3wXL2tH5e1J6ADc4okAO0oFhOyolWL+4EJiHbj00ysPy8Xn8eiWojQDzwrAMmw2qNiVlwIpAJgowKdFbmwZYewbEkgo3IU6pF2RVeIacMRb+Piyd9HAFAwHV/L2blJnFBsSyvR5AQAjkmZtSu2lp4V4/gab4bM40AZEnmMACYwAqeXOrjgHJCFwSiZhQMY9g1rBwVKKsuPUvaZygqOUcgMiGbmoRsJgTSvIChghSN0DPkSJIKqi8Zs0clTJEjoOj1yjEp5UguOD4j6Kru8SJHv2e5xKq3kKpJL83RFVbOkWigEXB4MtRFiwJhrQY/5CiXgww07UMobxtRGjwGSjkSAFXkSCtHZeK6PpEAhOqjZbnLT82aazNqKnIk4OereLKXYnVBKiuCIAspJmpS/VbKERJQEeH44XlwlC7aD07muPvKBur3l8pRKhYBQtBUShAAGhAUK+U9DmF8mvwsQ33LHgBAc0aRoyVk8p6jbBY0K9MNjjcL/O21Zc+1V02XRMyfnkbw2LfR9zhEsYpMGiIKVn62JOigrYg8epocgdZxWfRXiNt+OR8Vq1p9fcKv43qPobZVkjtRdbqej4+hBQ7eaoHAh0CCqaUaqCTA6g6LWYAcC6B8O/JE2gAIAsKmEAccC31DjuYDGUrLa/BadeBoWQ0bsD4C2tPV0HFkrtNCXotqKnneZW0c/X9V2J+jLy+HOw/Mgv59gRwMEAlanqeJOYiHwuPIrAiDEAK5KkDiwJZ6dT47nThpE8gDBw7gZS97GV7+8pdXiNG5gre+9a14+OGH0e/38dWvfhXPec5zzvSQAACfvu8ppGoyQ1qSI5mkuEDrCELTl4hnDHWRQcjwE/PKHXpCYZSjlKK/VEj1SS7kLMCuXfsBGOVIIZ6cRdhSO7wCQdbVZfoEBH0SIPSYpRwpBWWEcjQQVisN0lRyOQdnBPBCFFw5RRfIl5bQSwvw1JevxsGkBH9gywQSTx2b4+qdbVCZE+NnfVACHLhsn/VpGFb5t7HleDnpHorUhJMi8Ruy27YK8xHsnCzfhzATVlnpZ8gyQ3yAUjliysNJ5VDJWUJQ4OFjq6WXi/xeLm6GmGTfB8pLKby2+A094VDB0JOLbxQbYhHkwFLPhNVyO6ymW14QLHRSLFsNTOuTJq8DALjM0aICWJYl2LlO8jbKEQtqOgQnZDsQ3l807tiQypHc8UftOfiqByABVpeOwV96BCtZtfw2spoUJ8L8nDMGkSSIqeqNw8DkAmvCbzm8QmB5SSayEh+C5DrnSLMGEqHWOYynaQuzjUArPaVytIojSz3k8hqmIBC2umblHPVHJGR7FRNIgIu0rFarhNWqCdkUucw5UgfRUjmi9quXixUwoBxJf7iCAGl3tZJzRImlHKn8KuRIusZZXr0flSpcPyvQLZSPTAqCEO2aByZUGDzHVM2HFxjzTJanECq5FgV8XialkwHlSLe2oVbuli4WKJ+/qm1PCKYnzVwTNpULfwFe9DC/mmB1vvyeqSDoB54uLPC0eWiKDtuBRJKoIOni6SZBrf0QiDxWyHnJJkdELuo5EfAKE4L00hRbt+2QxytytIhM9qokbAZBT95bU2aRVsoRm55CkCqvo1XkKomd1st8uSjD1PSk/JyJJkfLjW/hqtpfIJibBC2K8rIR5d++600i9jnq28q5QiCXxSnlOT/SOIImCUA411XL7U6kQ6+eZNwRC5CwBVC+w3xetEAIxSQpCWujWz7/CVmdLLIaalNNqMgEzxIEpG/SMeqWliJv5RzACy6bxfMONiDkBR4Ys33ceemsSfkSfTQ8qskRIR5S30dK1aYPADJtaBuEMc5kb7WTann72GOP4ZOf/CQeffRRJEk13v3BD37wlAzsQsWffO1xeMT4wZQmgOXFssIbmIm4zjnyMg7iZzo3x/cZummOlJi4t59RxL1AX+AAcGg5x54tF4Nlf4uctmDvO2vtCdRVqS6AOO8h6ZpKtD68auNZCdVFXD3uMSobexokeaLj16kMq4FGyLzy9QvkKJaX0ZvI4akEQ8FBpM/J3uka+r48lqT4wWu34+jnyp1VkHdwyVwDM1ZYjdA22sf+Z2kbRQR62w+idvRvIEQfiddEkVPoHAfKsXNCLjhcACAQRKCfFbq6C6DglKDmMzAGIDPKUWGVNR9fkQqdJLVzK00cCadBsnZ5TpfvQ6d9nTycYlUSlNiqMgsygoQWWmUoKCzlSC6qgpRhtb7anTO0rLwOAODcrMhKObJbknA5MfGwZnJiJF/x00Ws6rCaB44URBKF2sxu+KxAJ2fIqcDK0jGEK4+hlzHzAiiVK59RJHmBfiFPEAQyGVbTrQSg2nkY5UiIHJwIrKpGlcQvPaBqNTDKdHk/RYAAHSyQBg60QpBQkiMINMQqvrHUQ56bnCNeN4u0ISaWCaTVKHUo50gqR3Z+HGcUjJgVgaFayp8TgnrAYfLkZcJ/MUyOKDPu4J3VBSRpZMiRfL7HKFZUGxXkZVit0mmBg0hTv36Wa3JUfi8xJkIKVihlKC+bwQamNQ/LMq38UJmLR1mZFwcArEjlOZLXpq18qR/k63eXlYEqx/SUuTZVBRqIAElSZEWOJUl6mAC6fqALELxQuSynOFq7DF7vb8rX6C/j6Rbw5af+Djd7e+wToJUgAOBNjhxATnI0dVNkCp53sVfm7qicozx9FCACBeEgtI1ap1SOtuy5HMDfg4Dg4NTB8nWnphGmOZYACLGCQia5E1pH11uFzz1Mys1KyoB45btYCRkemXukVHN37kDtu49hOVL3PcPDfow4YIgmWiBCyPDqoj7Xx5pd7CCyrYrsulfvhjq6F0piF/MIHW8enmfNh7xUseaCRTwIE/4+UlMb3BgTM41yfREJ0Ovj3zx7C44/oPrBWcnRzGwg/sMPXYWnk4fL1jMAlE/tZM3Hdbva+DIjQF6mM9Q5R6CKV+Cjzz1kxFqBRKY7Q4SW8eiZwIbJ0Wc+8xm87GUvw969e/Htb38bV1xxBR5++GEIIXDddddtxhgvGDy93MfnHjiKO+oeSrUoRyzMQrPMWqiHHigpzQi9nJe9qVTicuChl+RIIKCaynoZRa0f6nwjQoCHjndx1a7tYHlnSDmKmk3EnXmUE2EBL02Q9FW+BkFfeKVypOR/+TySK75vlCOzU5bqTJpopSahrAyr8RCF3wfgoUCKfGkZ3SQDk/X9TFBQmWQccIZ+IHetSPHCiyfxB1J2rqGH//Saa+D5AXjQQtZfRJQCNz1Q7vzCyRTbLr8Ri//7bwCRoBs1kFlVXV3GcEApR7yAANXNXo1xNykr1Qgx5Ejmg+k0FFJWbfXSHI3sKI4AeCy7BqnfABHl7jMjPdRXH8BxlLvkjnSbjgKV15GXTXcpoHO0qEArUhUp6sRSLHRSrC4tyvfmmJxuV75PTxkqCmBFkl411sLirjysGTVMruteuoRjKjhEfPjMLPhhvQ1OlapVoLs8j2jlEDIMTyqRz5B0C3STrEw4LgRySlD0+sgSlafBQKXKxDwVXszBBdBZVT5THkBLy40oqmNlVSbaCwrKBRZEHVfvaKN4OEKCUiloi1UsdFLk8vqngqBhhS/tXOjBsFouyRHn5iAuq9UqyhEl0nBTPV3IdidKoWNohJ5p0aDzVMp/qLXRUMnkBQSS1WVkKdX3D5ULUsAZUmFUxiQpquSIeKBSZeylBUhOEQBlWI2EmA2Xdf4SkOHqnW34AS+VOQGQAtr4j5CidDLnVCtLvshk81mlHFndEJT6KD+j9ucBxcyUyTmK6mZxrvUICF9GR4ZDWSGQhXWz0ZLKESm6yFmIoJejAFDrLeJoi+CRxz+PnZYPE2CUIADwmz5Kb/4U2wrjccSKLq7dPVU9Xio4GZsCIQTtpXL+2H/xTcADf499rX2IZWNuXlGOVpCrRGNSR8dbRMhD1GXVWeKVDvAAcLjdA44BfO+luPq//gOebNVwvD2F/3nR96PLeOl3RSm8QiBhBEK5YwuK5ZpAS7YlovJOu2yVoie/i4hJlYdHeLJ5BLvyJkCbQLGEwt8NBmBH/CQAY8Wz3CwJo8jrmJuuYVmawfJuAXTm9dqhqmkBgAVA1imvgMlA4DvHj2nLinrM8dE33lC2imEUlLOy+ED0EZMCnrJsIRyrhEEQAUEZSJEDwiSAx3H1Oz3d2HBY7Z577sE73/lOfPOb30QYhvjEJz6BQ4cO4Y477sArX/nKzRjjBYM/+8cnkBcCTeuiCDU5oujyOmKfyV0qwAoKD0KH1aIwLMNqlnJEBUXcN8qRoATfPSLl66ILwgw5YuCglJWOulKdCvLc8mMZoRypeV7vlJVyRPROV+cZFSaHJqGy/xqPkPnKnDFBsbyE5V4HQWqRI6uSpheZSpnOoUf1yJt+ggNbyp3p1I5SSp49/ihiKUwk0z5uuUpZ9BfoeTWkeoAcHe5h52Q56agq6QICpChgcq0J2jJJVHeUULtntRDKdWK+k6CZlYnwR3i50/RE+Z2sBh62HfprAEDYX0WPqjYpFIpaeCoUonfn0O+tFiClHHUWF/Tn2DZnwgmARY4gsJoqcqRWZbPCe2FNO+CqWc7PlnFcqJJ3HzXPqkQjRIdWC5Kjt7IAf+Ww/GN1WlGhtW5S6PyanDKINNHKEQEH5Uo5MmE1DqCr2kUQD0KOudYw162XCQStFIuo4aa9k/BkeLIgBeZI+ZkzTfBheRyZc2krR4bUlC1OODN0jyMfMlzkA/k2VAj0c1OZWciwGtXn24TbyiFYypHuKwf0VpdQJB29oWDUbDxyYthQMtg6hHBQea30sxwrmtznyHiEKTYPKlR1ZY6LZuoyrCYLPYQHrf6RMqzGuFGOeFGUnlAqrFZpMi2vTXmOutqDiVVIaVgPoGIz9S4B8RbQWym/K1YUyOO2PtaXyhFP5SZAhqSj3iL6sxM43juO/370M5VzYCtHcUsZEfZwWawU7gAQq1qttI8HAN8rq8xq3VI5euG1r8SrLnkVfvrGn9bH8KkphCrXsliBULYXtIaOv4SABYga5ZwkQJDKnLojYYocAN+xHxAClzw1D3qQ4mv1ct6K5TUQKKKZl8owExRdH2hzWXkm14FaypHLTVrMuvI1anhsqguW9+HFz0MtZaDBATDRx/ZL9lVCxSvN8jOKLMaO2Zq+DuIexZEjh/X5DqywPwlUIQuAZBULSyrvkaBWo3j+ZXO4ckd5j1Jl/ioS1ItEkyNCfCypvo2e8tZLdUl/3eoNeSawYXJ0//33441vfCMAgHOObreLer2O973vffjlX/7lUz7ACwl/8rXHAQB756b1Y75qrAmGLg9QCzgYVQm5rOxcr/yAoliG1YRZ4QHEiQdV7l9QpskRRw8gdahJypcTWxSG0HHnPNceRoocjVKOFDlS++eAU5g4gso5KtUwAOhTr0ziZBzCVz42GXrH57G8+hT8VJoz5gCNzE2Z13r6sz1+f2nxT0kNXmBu9mvuegHCgGH7vEnYXdizD5ftmtK/ZyxAqsZHAiwzXiZkA6D61OWIs8Sq7iKYlEmi3Fd5HDIHSssA5T/HVxNEdMVU9gCoyXPT8T3QXFbM5H1kQpYsM6rJg5dzedpUdR9BW+rVVEtyJTlakk14CRh2WNcOAPiBcqUu0FXkSC3+1t3vh3VwqtrVlOMM8xUsauXIQzOohtCFVjlydJcX4HdkfgaJKsephqqdJNP5UjmlEGmKTIXlCdf9GJmvSrdzMGE3NeY6p6TeMjkgYZqAzgj4no8rtrfgK7dnUmCXJ5sya6sJYFvLVo5UYUFpAlkqHorUlMoRYxQ6pCQKZANkRN2PRJOjHL0s1apUQRiaIdc5Ver1TccXy5fK8njqd5chklV9PPMsciTM/Z0Ug1odA5VEpJcWWDIZr0iZh7Y4pokNJRkYJaCc6EKPApHOiROkKK9LSqA6sTNRyPYo8nqxyKNx+JZjszzCbPihSQ+o9Sgon0cmvah4ngMNU42llCOmVB/VST6geP6l3wcAeKSoOik3rJyjunJUFz1c01IFBgFATPpAY2oKNjgpnx91j4LFBHHUxL+79d/h9u2362PY9AxC3V9tFYVUsQmto+MtI+QhvDBEsPdy/ZyUFeh5BRYphT+5G5+6soUH5wD20rtN42Z5v4TyvCoPOl4UACGY4OXmWYXEe4WHQobTGnxVvkYdj88QBL15MG8fpvIrQQhHmz4Ods2rML1rjx5Pv1bOHx5p4Lr9k1Bzf73vY+Ho09AmnqG5r2msvucMeXcZiyrvkfioNarfNQtUmkgCP++Dq2bTxMNx6dhOdRFGqq+9xhlMxgZOghzVajVtwLdt2zZ873vf0387evTouKc5nACHF7v45uOLYJTg4LY5qK9G7/AERUo5aj6XrQpKVYhbOUn1RhvdRClHRJOIIDXu2H3iG3LEykaCygyyTqQk64d6ooSoLiwlOaK65FhBFIPKEQWt3iNIEqMc9SmHp14jMAZ9i08vYGXlCHwZluZFAVozEjxtJCCyVFY1h+Qi0K7eAHDFnXfhdT/0QrTkxCwgcMOrXwXGuJ5ogQJPyTJbQgL0gliHrZgy4USKRpagUPEnUlaqAYCnkqPlja5zUGTo4/hqgjwI0Fp6UI+rGVAIRiAowUqoEs4LqI6ppVeMWoBkTZry/qBEj49YytHj812sLKpSdwbfrySfwI/UYivQzaVCp8Jq1vcT+FyTI4UoX8ZKIV+P+GjVB0rd9UKeoLe0ACYTvgmtkiNVNt9Nc+3jU1ACkVjkCEySEKtaDTm4IOh3rbCaJBhNazGL+l08OTOF63a34TFqPHRQYAuTFUSqNF0Al20dUa2GspS/PDd2WI1I5UhVouWl8jaQkG2DQMj8OhV2LKvVmA7P5aVoJ0lohRwF5vtKOyso+h1NZJgnE155aeehLstUDNxohCOU+XOL3RRLqRlfxgLUs3n9mkQmxJfzhSK7oVYLCqoMLC3lSBTop6bAo6h0T1fXpvzsXeWXVJ0vfCt3MuoTxHwBhaw85XmKoG5K8VXOESEZiDDWJeHkBF6676Xl+zCBPi+/47DeMN5IAOot6SYtetjKLeWImnmHcQ/1llHsKZ2FQIGwdxxeY3T2CZ+eQijDanlxFGoDSmi99DiSBqvP+bGfwWJ7BgQC880EIMBxxnAs9vHxFyzh3W/muGjXs/V9qTYTNemuLbJH5HkpNwkTfjlOpQp3RYBC9c30F+VrNHCsAXjJAgBgpVmq5g3/CHDw5ZjetRcAsFQzJP6ll1+MuXYEIq+DuO+js2DsFZSCBwCsoZo7d9FfWsHq8WPyvIaotapFGTxSCfU9eGkPTIdsPRxNVejUkCN17bUbVcJ6urFhcnTLLbfo1iAveclL8M53vhP33nsv3vzmN+OWW2455QO8ULC1FeGL9zwf//l112K6PgHz1Si/EQoQglrAdK8nIojuoSQANOpT6KW56fIlQ2t7u9AX3AJp4OFjq8jyAp4nL1KZd1SXfkM+9WHMQYyHERFAX3gIOJMJmoYUKBldLRo+NwnZepFITP5UyjwtafsUmsgtHltEt/O0JkdeLsAsR3ISF/pzHTtUEg+v4OBRdUGu777YPGciBztYGpT6sWmY+vVbr5QH+CBT2zTpUGoLRIp6nqEQRl2YlOTIVyEulC0+VBKhMpw7vpqg32ijvfBdPY7mRADSLt9/SSdhCghJQHxGTZl+TqE6hAFAxkjZ0w5V5ej+J5fQXTVNfwfhx4YM9pXPkcoVsdZUn1FNjgpCUKQp4mIF3axMimQFELWq57jQzWf7SI4dQy9X5K1qRKkm+26SG+WI0DKspkwjCdchJab8qERJjpKeCqv5gDwHrVmjDES9Jfxw/Wdw055yMo1kImdBMkzIKh87wfjAVrMI2iGtJCtkro3Q58HjVF6nKtlaSIds8/m43igYYtCzKjNzylEPuaU2FvifP/Vs85rWRoNFpnVP0l0Feh2oOYDLz+5zCgimc+JKEz2LuBKOyS1tAGVfuZV+jlxeVxnnCNIFfTyj5nlE7mZyEmp1JmcMAa96QlEU6CW52RhwazE0NuAoCoE8tciXBT9kutoqTgimybKeowTJ0WyY/pVqQ1Mwgvb8d/TYom07ceX0ldgt24z0ovJ82PlGANCckCqU6KMjDSBBfIB1K8c1rEpPwqaRYRFUZODNEKNAwxCRJPJElenDByE+Ot6SrgS9bMcEnpzdj7fs/zLuu+YJAMB8EON3Hvp/QEiBbOUS1IipKlNh6C2SwKjWTkR+7qmgXb6TJNJ9EkBIY5CJeAGgHJFXAwhBhnIj3KmVZPOBnbcDURtbLy5bG803TKXjc/eX76dyGoPUR9A31zYLrJyjCZksXfTQXVhFb/6YPCxAPFk9X35DHdtFstKxSIeH48o2RG1aC9NCZqJRVcFPNzZMjj74wQ/i5ptvBgC85z3vwV133YWPf/zj2L17tzaBdDg5zDZD3H3FVkRBA2ahU5N6+VXFPtc7KQGiJygQoFWbKXfnpCwnVknZuTCtOFIeIs0F/vdjiwhVoqNs2NqalYmGlOvdAwqgUBnJxFKOWHW3ahI4beWoGlbL+lYFj9WvyidU7yJXllbR7R4Hl+XzPqArpAAg8gNQOal2ZVPCIIPOM1Hg2/frnye2eUC7nGyN226CZaYW3QDRdmunqh2aM9Tz1ORVEYJ2rXzvUOdPCKT9vl6oiFzAjq8mePyFP4B4+WH9us1t0+AyKXVekhZBCqAoPXA8RnRYjRVMmgSq3blpSkp1GIdiuZeh31H5NAMKAoCwoUJMOUSvCxS5yYWxFI/Ao+BMkSOgv7SAWrGKPJPJ8GkXfKKavC90f7U+kqVFrCbyexpQjiLLCFKV+xYUUjlSIVsOpshRYJzCOQiynkqi9bTBY3PWtPqZ92Isszpu3FvK8HHTWFHUlFqWG1PDfdNWFYwdVhskR6A6pGTCajnywqi1gFGOVGNZAoHVpA9jalfmHHFta5BjSz3UL8Gse4nFUi1Ajv7qCopuTyf9c2kCWipHBIXMYcooByEm5EkExdy2kig+sdDFci9DLu+vjHngyaKeNzzfIkeSBGXMhNVS6g31TaMC6HRNiFx4RrW1KVA/K4BE6DHZsJWjMKGoI4FaFHMq0GpaJejyns0oxcFv/S54ugAAiHbvByEEL9n3knJcjfK45kw1725iSlVsCazI4gVCAhC/GiZuzshrijZAaAiIMhLC21WyX3nOzt2V3wktv7+Ov6Td5z1G0ZzahoaXYJKUn/H+xhT+9Lt/CgBIjt6Jp2XLnshjetO469obKq+dy83LdFSSuNBSZFCUm4CIZ4BfQ+yV41hlncprUFmwcfA5z8fkD96Orx6Y139rh+XfVD/GIPOstiSs0srDm2zLn1Kszq+gv2jOa9yqkiPlUydEF51F9X4EICGOybYoodzcknwJOhKSTlda1JxubJgc7du3D1ddVTqUxnGMD3/4w/jHf/xH/PEf//FZYaJ4PiAKmmbSVnFs+bdawBBEysFaaHm8IAKzzd3l7hwAp0ZhSQTTu61pWer9m3/zvTI3CQALrsVcF7j2pl3lexEC6JYDALRyJNCHL5UjFbtXMZpqyKVUjqrkqOibsE3hmRsoICYZtNPpotc/DiYXM7NrleeGhaDCJBkDQJSkCAeqVUh7G1ggkxSvND5cvqx8EyIBllW4JsDsFjMBKvlYiBS1NDXKEQEmpHIUNkyor7ewpNUY5S8zv5pgfmYH/u9dV4JIX4/W9jYi6XuSMwoIgShdhhC+zDcilsEjBYe9cJlwmSJHat7IBlq32AjqZrFlvR6Q9XWSt+Dm9vcZBVfluURg8cjjqGMVkG0Jgv4q4huqk7XqYwfRR7bSwWpfhQpHG0F2khyCqCRuUuYcpSrniIFrE0hFYAtQQXQjSgIPRHogNbZu06//SH0rPEZw7c6SHDUaRmn0dPd5Rd7tJqmA3Xern8mwmgoJyQbKZchJqSaimnNEhhOyiRBYScr6KPU6jdCzGu9mZYhZvgw32f1g2mYgQ7LSAxKrNYwMPQRc5W2ZBPKImNw2Kghmt5TfzVIvK/u4UUV8GEhvVb9mYEVhqWq7QgPoBrAs0OfL5FQJLHXNuAp/OIeLCEmGdTPSAXIUcm1U66UeQkFNnhMtUGsbgqMcsrNCwE8WUcj3jfeU683rL3s9XnfgdbhkR2nOOKgcNSZ2QC11ndUFOc4AJKj6QzVnyw0SZeV780Tm+kxU55bKa196qXY6B8qQGlC6Y0fM3Afxnhvw5/lNmJK5mR8JC6RFCpZchLy7F0ckOVIqKwDM3XIr/MzMmRkvnzsdl8Q3Vp0M8qMAyuup6fWBsK3NXeeDKjmKpmVyu+dh283XoRuYOWZCOmcrJdPLOHxZGENBAG6Z1Foq29LRBaRLpl+dSoDXn12GwEXRRXdZkagIhBAsyTwrRfSgFTjgwd/mKPJzjBwdO3Zs6PGFhQXs27dvxDMcNorQb2k2JAolqZa/13yuiYAghU4IzlmBbRP70ZOePJwLrRylgCZHl+2aASHAp+57Ct86Xl7QnojwgonDmLzttXoMajIrCqpDEoQACbhUjqoLtFnHZWiKUV19pNsoJBaBsnJjAsIg5G6l10vQ6y/ocGEUVOPXDa8GJqokIO73QeOB3Z0Xon1xjnAyQe35d5v30gpTgmhFTrqUYdesURNCna+QIi5yrREUxITV4okG9IR7bEEnpzLpzXJsNUE3zfFne25GvXgQk+kT2P38q1CfMxP3xU/NAzQBCl8veOq8E8HgW7smasf7BxKAVYU9HUmO5AQqMvhJhjRZNuZ91kTMGQWVi60gAot//deooQM/KY+JRR+122+vvDZtqPLnPvJOilVl3LlGWK3Q3dMlOUqUWy4Hk2qirRxRUE2OQLhO7mxtM/4tnb0H8cZb92iFqlFvQl2HebdXhiNVAjKqk60O9xCUCdmWWicI1RVidkipWq1GKmExdWS33zUWFoSXypH+DjOsdDpWCNpqbKuq8ESGtJNA9BPosFqoEvdl+EtVv4kM0zCLChVAa6qNhlSalnqZadFAAC8l0M1gAyukJ/PgMubp+aLLY9NU1iI+qytm0aW+WQztItaFTqL7xw0Sd+4btdjPAnjCN2E1XqDZsopSdJNagYKQcmMBIN5brjd1v457br4Htz7v5ZjYuh0X31K9TuvRNFRfuG7PtKJhcfVauOTW5yBCHyHKTWIkDSC96fGJwdGBA7qcvzwZhhwFltJy1c4J/Hj6U3gwL9sJLchrbCZ9GYBS4QOMygoA3twc2l0T+kt4jh1pikhWMzekb1Tpg1RWszEigGgCMZcNZutW6FDkaEybeUSRIYXJsCQ8KtGbFRyedMgvyZHlwzYxqTepy8eXUawYFT6erPoTxdomoYvuylL5syzaSKViHknlqE8eks/i4EiRDjSIP53YMDl6+OGHkef50OP9fh+PP/74KRnUhY4obGnPCGUfT4sefEZxYGsDcasNNVkXsidPygrM1raXJbYAPI+U0jBKciQs5ejuy8sY/NN5mey55akvYfL/+FNg5016DCpEUwhSCUkI0FI54gNJinohNxU1VKs+UpFIDTniliIUUI5CTnhJmqKfLumJMgrMDQkAE35TqwAKte5qJWlbYfblV2Hvi1fBrnqJfswLjXJU75Y3cZ8z7Jg0C7pS5iAyREUB1SG9sEr5/UYbqqqje3RekyNfJk3PdxIZ4iR49BV34LUf/RcI2g3MXXoJCiLQ6nSx/6l59H1AFB58eT7snCMuLOXIOg9MGQWqAjl52Cg3WWW2J5AhSoBOdx568ferqpwqKCpQoP8Xf4mUd8DzcjyTe7dpDx4FOrFNn8ui28eKVJlAq7K6mvCXeylyRbqJQN7vGuUIDJ4MnzIdRs1BQSxy5OkE87DVRqMA/LzAe//dm/HzLz2o368exVAO8Z3VHvZN13WCMBuc8ZQKJ8NqhRVWG1WiTmXO0aDPEWDOvxAC/b7pI5jSslrND1VJeYbl1WUdhuOeTY7UjjxHf7UPkqQ6BMaV35cM3arOLgI5tssFEihrAojvl70CJVJdpp8gWy3bhABAYNkaeLHcLDCuVZxlVtfvZ5QjYHllRT+PRDY5MhV5R5b7UP5+g+SIEAJKla+RBz+rV8jRVMMK4QTmc/Qtx/X6fhM6B4C9196AN/+n38SOA5dXHo/9mlYsu8oOgDbA69WLYcv+S3FH6xHsOlqG6yfmy2pYPj0+MTi49FJdsVZ+LkmOfJNzBAC3759C6DGs1q/Wj/3w5T+MHVGpdj16vCPHyqzXIqjD2Gf0vBzX9/rwg/J81wcq7LRfVtTWXkyH2qv673lxHI3YXGutoBomb8tcJjW/sIKByaoNDgCe5eDfnACVSYvLx5dQrKov2kfQrCptzS1qnuigJ68bFX7MIDBVCxDKTdzDE4cAAF7Ww53f+EXwsFpgcjqxbhPIT37yk/rnv/zLv0SrZU5snuf4zGc+gz179pzSwV2oiMJJFCQv6YlsPMiKRfzBW27GjokYj85MlKqQ6EIUJRPPeI6Yt/VreD41OUcUeifoRxF+/Lb9+B//9CQeb9TxLz//06hffzX8nTtRgUqcFUSvwgQCHiNl6a/cvepcG32MfH9GwdREppSKVK0oFJ5FjnzKkbOSjiV5jiRZhSd357VGNUQzEbQxjxXrEYJGZxG0NsJN9bUfB5IVIDYScKASt0UfQU6RA+gwhkvmLG8puTMTIpXkqERBTCl/0JwEIQxCAMtHjhkyFwfAInBsJcGMdLy2d4Pbd1+Cf/v8x/DO/38BAqDnARBGOdITnKCm5BWAZydDDoQalYAwaqcTNtTnyhD3KTrdY5rIIRi4/VUzXeTwHngYq/uhv7stt1839NpxexbHgFI5Sgg6srx8UDmKZJ7Qk0s9tIhqj0GQ9nsmJEg4uMo5snLMCCiQmi7efmS8gd70kd9DlqYIJqs74FoUoySufXR6GfbP1kFE+T2ywcoyq/S8JEcmn0hQ+/woYqAcsqGfx9mgciSQ9HuWcsRQDzz4yr9MpFhdOa4VPM8KS/lNo5iknR4QmtwelXujlJxMR61zzORW5ZX8d2s7wgOyMjXRnc8TpN0QkEqTb20qfBmmySkA6WK8wFpDyhEA9Dpm0eWRXf1nfjiy3NeKMh1xdTLuI+sDfsERpQ0IlHOZYMCU5cZMGQPzPORpir7aRAgBzwqfrgVKKAqmFFZJSNlOeCNyidr1CHsf/nNkyWew82h5Tr2tc0PHKQSXXIIgNfcpoTUI2kXGEl2tBpiCm795IsPP/93/i4NTB/GT1/4kfvah+wAAjxxT5Kh6T8axee1uKHB9r6/Dzo3Zam4VlSQW0YQOqz0ytQpIC6IuPYZ6aF5/IjT3TcNr6CpotUmhggJFOUdyCCAw57sWNsAERQags9SF6MmIhQBIWCVHE1tk/pjoo7cq525LOZqq+9p6ob1Szq+LdWDHr/4n8Okzl5S9bnL0ile8AkA5mSifIwXP87Bnzx584AMfOKWDu1DhBQ3kZAkMNXj9I+h7DJwT3LCnXOT9egRCYgjRhSgWAADCy9FPzXbWD5jOOcpFqkmMH0a4YnsLH/9Xt6IRPgs7v3MVwiuvHBqDWTSIlU8kEKq8EFWhonxhBsNqnIL6ZhcJaP4AyERXhYB4WOVl8nUmciT9THdurrerpGcimsQ8TBIhSB1Rcgh0IOcIAMB9gE9WHlLVakIk2jxzx7ZpbG8bEhbW1KSRIshNWC2jRJfy+81pkHLKwNKReagcjUZdxvqVcgRYHeBL6brvF3hkS4FrHgFWQ0AUPoJALfpSxi4oPK0cEXihWcS4Pvfl/zQ5IsMLUNg0C3Lc99DpHjfkKBrYlWkzbZlL8rl9EPvLzzVzwxVDr92eaOEQACH6SBGgo8OvA3liskrr8GIPDaVIEoEs6SLP1EXBwFVYzap+oqAQeVpeVcSHXZTot9sYta/0A0+7PXeTAhfP1pEpIjJIjqywWZoXyArjbE1GkCMmpHIEsxkwCdnySAEkqVGO+tRDI+So6+qbFMuLT+vXsJWRmrXgZ90cSFNz3KBypMeeoVmYRZTJe3erVWWVMWkRIBIUqTmJUd0KJ7eMYqqUo+N8ShN3E+sHep0V/RiLzGvYPRePLPVApN8AHaFqcj9EfxVgGUetX4dKgBa8XDBteEFYkiNJoDkZbmG0FhKvhyiV9xibhlcQ+ANNmgGg3axjCcdx0eEumACoV8DfOz5dhNXr8O2EdFpH4cneb7yqerdjHy/edzfqXowbt94Ij3mYlgrZoRHKEQBEUxz8WI6MMayGpXIEma/ZmquStrAm54uwrcNqfX8VJO9DsAAL/iLqgXl9pRQBJhkbsMvqie7/55ECCMwcOxfP4VvKBb3Th6oH4KIAgoE5e2K2vISJyfkitBxfBmCqHug8sWan/H7zgKJ28004k1g3OSrkArl371585StfwfQZZHTnPbwIBSuAFNqokFqTuhcyEBpDFMdQFCVRIJ5AT5mIeQzBRB14pLzIC6RaAVAJyTftlaRh6x2jx1BRjpTBo9ATM7WclxmKSpgBkA7ZTBkQCqjS3hLUSmIFfObhOC9QA5CJAllSQJGNeKCRajuchsB9+ndKa+B5H7Q+QjkaAbtaTalp+/ZsqxwT11WicYbACuGlhGrlKGzP6hDY0vySZVxWLm7HV1OsJub7UFBx/v9+A0Etr+N/XN8FVj1NFo1yROBrd3QG31qAPEtZmYlRdpqHtgCqIGwqhbdA1PfQ6c3rMA2JqvlcgpvFVgDohtt0WDccEbZst+VCLvrIRQMpZOL0QA6O2g0/Pt/FPmoUx7TXRabyNQjXztisEr6jICpsQTzEtRNPWZ7PoKwhOjnDxdM+7ocKNw8QSJU7R0oFPMuMckQsoy4ig2AUoqxWU+k+ADgdOPFEgAlDajLqIfYZWk3TMHdl8SgUefIjQ2IasVlQs14GWPksnlRHjXKkLB1y1HJrfpA/bm2b1xWqAEIkKFLzXYbKIBFArakU0wTKs+comzAJ2cSQwL6qIAQFt8Nq1rl4YqEHldbDBs8RAC5D3BnjaHWbZdUVgLTGhkiCF4borSwjlWP0ePXaPRF6cRdRpxwn9fbCS1cRTM4MHedNtAEcV3ZliKYTEMutexSi9hSQlqoXoXWkfkmOGt7whs2jHp6/+/n6d9Vc99iqNPId+Nx0poWrvv40lkIfD+3rY2eWAVIVqm/ZUjl2pim/E0s5AkuwwnuoiQDfmuqhbuVw+sxHxCN0s25FRfJV5Zgg+jr3SF5RjuZqc9qDr9NPwEq5EYFIK7lJAFAPGmV4HALdvjxPJC7nXwJM1XztUE5VPom/bmqyadhwztFDDz3kiNFmg3lYrss8HZXPYJOjgAEqdCGz+4lPKw6rQbuulSMh+rojdH1yncZaqqrIJkdE6Oox6hniw5HrhHGVW+AzCq4v8KK8kVI1uZZVQAoBDdALlBO0ADGKPaKB8U7Gs8gt8zbZNQrUaia6Fsp8LUAUHT0Zzw7I5lFdvWcOvyi0o3RGqTZiDBttEBlzX13qaFlsSr7+fCfBA0+V53zHhNmpe8xDQOo43iT4rbsyPDlJIIQvDSABRlXYgCIUxpk8tBYgT5skChyco3p3N5gYDAB+zZyXuO9jpbsAlW/CatXcILt67R8OBHhgzyXQIZ0BHykAmNal/QJh36q48qoT20Uz5dgfPLqKjBhylCc9pNqckWgLhSq5IkBhQm/1xgiFcADMp6XJH4BeEeL2nb4ODXsDIUn1XoKUVVjdnsnxqPr3SPKqco7s99NhJ/VEAW5kUlDfL/t0tcxifGzhiCapQWQWk0boQQXGilRI5aiECvdyRkFJSdaBMp8stBw91Xi2WjlHqj0DRIJCqOubImxaoRXVtFgk+npeZHV9z9v0Ju3a5MhqK2Ed9K0nl/S8MErVVIpZxihqSR0oyvu6HdeHVCF1bCKNGm1DwvXA4oNgfC+8rINwcsvQcWwgRBvPJEC49twyvXuX/rkeruDY7tLbrO6feMM206gSiWiAFPCZKcwureKSp+axk0oFVRJdPjEBJnNM/SzDzkh2BLByjghN8JWpR/FU/WE82DhaCasBZrNmJ2cHlq+cbiVEMyA0j0+Gk1AJZb00BZPkPLL9ttRnoLwM0QFIUlk4QCOoDfBUzR+qMGT+mcs1UlgXPfv1X//1db/g2972tpMejINBf6CSIiRVcjToQkw8X5fxRx6DF3JNjiB6EDJ3qb2lqpKMg1o0ChhyBBRaObLzQkYpRz63yZEoG+RaobcKOWI+VkNZdUcL0FWVAMrhDShHE/UtSOiKtnf2ZOsE2lgfOapPlsReFMtaOZrcWn2PmrWIecihFQDO9Q6Y1+q6Oqy72ociHFOyrUVeCHznqXIHefXOduX1I95EP11BAdV6wEeglCOtBhBNjgCKyMqp0jkqQuC2vSE6f1P+ykeQIxY1AdlEOO77WF01PiKsXs25KKyJ+f+828Mr/ukSeEf+AcDoxWiyVYdK0w8SgpKncvCgSkBu3z8NTgmyQuiqqQICWb+HzCJHdjiNEAIhyqwdotU7D83miRcczqlOCu8JHw10oUieP5SErghG2RajY5EjQofVCQIhq9VkqJgQk5CtjwH8wnjoqIk+qNXLxYYILCwv6PBlaH0P9YCDEAohcuS5ALXyWby6IYYBZ8hp+b1C5PAKs8iq5P4tlmkn0caaCQpSlsBTQRBZOU4TUxPyGCuZnPjG+kCpPwLI+rKKFqyifGmyKQTuP7yE65U36qBdPsw1lVMKP2+iJ1XKoD68gVPHZjPTwPEjCOQmZN0IFYENQPg2eOk3UZs5OHQYbU2ioAJUhgPj2X4lZ3EU2vsvBr77TQDA1T99Df76vi8AR4CGf2IiPz0QPoy96nni8QT+aTew7zCwL+6UfEKqQjQIwPMCOaNoZX3soLJDhaUcEdrHP05+B9+a+lskx56F+kCeYSto4YnVJyrKUSQT0MurXJIvklbCapRQUKQAKLI8A5EbmBox16sNnR4n1EYnRjln+piqB+WmnZj8Vh4GI1/ndGJd5OhXf/VX1/VihBBHjk4RqNUUlBQC8T5z8XoBG0p6JV5k5bhQ+AHTC0RZ6ilAKEVrdnxyYeX15KJh/LcBECvnSJs4lsoRBirIPEbB9EIkyZGOEFRzjnwWoOvLHmUkB+8pcuSBtqqkp1Xfgr6/DC/fCogufHkv0kZ7XZ+rMSXJkVjWO/dauzqJRc05FSJHjAxdRY6s3AIaReVuiKCiNrRbU2gEDMv98vPMNgLMDTjs1ngbC+kT5oHCM6RT96kiCAqjHNWshdHTLSaA77+iiY+rx3l1t10+GMncqARRyrGybLUDaFRDZaVSwgDkmF3ejnq/rJWhnFeIi0Iz8iCIDyL6upUFaIQgrh7bCD3cuGcSX3jwGBJlLoocaT/Xla8EBNzyvlLkqOr87JlQ3hoglECViSfwgN6STn72/erYqKUcsSJHLzGkhoxpizHYW00lZJv9S1U5Ut8XCSOQ0mMbS51V1BQ5sr6HWmBalRQZhep6TARBYOXVlc1n1RvmKApDGn2p3Nk95JgdTpb5HlQAoVUyP6kiAsKU6WeUV32h1OO9HsqR0oryZfPz+U4KIgv7BosIAKNGZoxBEA+QSu6oMJZ2yd69Czh+BOHUOhVwNS7ZU415+0EIBU9XUduyd+g4Ek3ADwpkXQbCBKKJFGjtGDrOxtwtt4L89z+Gn6Xo8Awr6fiw2iBU0YZCPLCxEH4Nf/gDBQ4RD3/0tKzatO4TFf6bRBezZKH8xSrlJzQFpPGkED5qA5sDRYps5SiW5Kggud6YhCypkCMAYLwkNyJfhbpPW7zqHaUxGHmmMQSR5Kjmg3GOeGICneOl+uUHw0r16ca6yNFDDz104oMcTinsnNb5ZgcTL/8B/XsQe3qC08fTRiWs5oUmIVuRm9bM3EA+xxqQE365SbYSsnXOkdrxFGAyrKaL+UmZh2F6ZBWlqWNhSp7thTzgAbrBkjwyA+vLxFzBwNvt6rDCFgRfBqEXQ+RdxNJYklrhgbWgyBGKFaj9TBBXSUIUT5dhFFHAR46OUgksxzzCWBnOIkCSqjAfRbM+hYlaR5Ojq3ZUxw8ADV59TBS+JovKLZkIgkiX8VI0LB8nrhf4Aktdk5zuj/pueair6qKEo7toKv38AVJYeOrYHNc8/hxt/6Dy1AZBKUFGQ3h5Hz2uQq8xwsbwxHbngRl84cFj6Cu/HWRIkwx5rvyHCJhV0s4oQ1EUMM1wKAhhaE4M5z6NAqHla6Xw0F89DsXMwwEFjChbBBAwUaDXt5Qjbu/qVSk/BnyOhpO8IQBuKUcqz43GEShkhWS3B/VJIksNi30mrz2gyIllgkcqeWclOSqJLEQOIdr6b0GglCPzWbl21U/0uaBFAd4yJKPZNmFS9U9BmL42VT6RAFD0lelklRwRrpQ4UXktb8S1GVjkiAhjmilaw+Eu1RVgdb683u3eaesB3RUg+MKToMG/lmpkB436iPSQsAUe5si6DNFUAtKcqZSwj8LEZQexcJ2Pr+Nx7E9XsJyUG5D1KUcD5GhQ2fRr+PVDT+MvyQHsTcsyd6UcATqyhSlu5SKEbURqzCQBoeW1yIk/VFk5HZXnYCY2anljqlTKCiSgsjtyyFPAq84D3CvzYotclsORGLUB13E9TopqdIFEZS4sTPJ9a3pWk6NgzJxzOrHhnCMbQogzau99PsPerP3DJau4ZLcxNpvd3UA0YHqYk4my3xFkWC0wSakK7a3rC6kBZpIrYEIIsHKOVN5LmXNkh9WIdhW2d+m8yCEK9aFoJQTkswgdvyffLwVNZKhMsCHlCF4NhPcRBM8CD2/F1KqUaVvDyZWjUJuYkNK/gHYJHkg2DniofVF4nmlpOR8IQylDwSxX7S04iBfppG0AuHrHcLiv6Q8QOeEb5Uj1zQMQq500CFoNK6ymSZrA8e4Rfe59b1jdAaW6eszPOHqLalHzEderKkzOIoSyhcjM8rTOO1krv0NIL5c+U5VqMZoTw+rOnZeWOQV9XVKeI+tb5KggFe8sNYkXRJlEeiBFilptfYsikdVZGRhWl49DmR7GA21miJWQzUSBxCZHo5QjqN5qyvaAmGvZ4ki+pRyFMkRAw1C30ej3TaJ1bFVkli7pKt+PopBtdKiokqOAU52QLZBCCIs4SW+YRuhpI0hNcEWiQxusyEGahhwN5pUREN3WRg5O/0UkfX1MaCWRm3wxOW+oMMkIcuTL7yIn0J0AOAo8uuOlQ8cq5WhVtp8YR9jHoebXcazdQXv5sXJ0YhU1bwTRDltgYXlNxjMJ0No5fMwIHL9xOx7ammM1WcVKUm5A1pNz1Iq8SkL/YCk/ggZ2ZDlukk2dBaGAdV3Wlykuemoe+yPLmNlSjkCE7iEXsOH7+Eeu/BG85cq34GUXvUw/NrevJKcCic7LDIOimlAGIIyUVYu8FmgDs8Fo00biVZ9LaIxCEq8pSRBbMyaqEcfrK7DZTJwUOfqd3/kdXHnllYiiCFEU4aqrrsLv/u7vnuqxXdAI++YGWJ7sYHbStMBgnOLATVWpt4fpSum4F3AQYpI7AWBinflGADQ7EyishOxCK0d2WI0hh1k8oPNnmKW0eCLTZb2DOUdNv47VUKkvKUgmjccKAjagHIFxeBwIiwZ4dCvCZAlgYvi4MaCUoTZh5xCQoV0op9wy4RSaHPFB1UEdopLiCQUGyNFVA/lG5eetPiYKU61ml+nHwixArbqVkB0Y1e547zj07twfQY5geiV5GUOyID2DiI94YIHJeYQJ77j82B2jHK2xS1fkKJG7U9AIrRGmeftn69jejtAnpmFvnmS6gpGAANYCqpSGHGon6oEWGVg0nqjZILJiJiMU3c4itOlhbYDgSrIvIMCKHH07rDZCOSICyO0NIRE6D80OqxlyxBBIckSiSOepsUS9BoffHFiktZkk1aItAQG1PrvPKVKVxyNSAFZJft0cp9Sj0LKwUIuZn6WglnI0SIIJIbqtTfXzAdA98SgiKz+EMaMulaqWCmcOJ9jGslIuJykgOwEs0gZazWFyrTyeVudl2GVEgcBaqNfm8FQb2HXof6K1+D34na/DZyOSfsMW2hd1EE0naO3t6J6MJ3x9SYSWk2UTVluHckQpqdgWRAM5R1SWxV9CSoPltLat8kXseNVrsV+sYmKH5YQdtRFarT4oK4mVbUqpsLe1F2+77m0VQ8ip7VPQ7aOkVUwcDVOFqFElMF7hoVEboxxZVaJlkr6PTGbrT8n5UlWsAUAcr8/DajOxYXL0wQ9+ED/2Yz+GF7/4xfijP/ojfPzjH8fdd9+NH/3RH113bpLDiZHt6iOnAv9wyQL2UzZUHn3Zcw5Ufj9OJkxYTSlHAEDMTdLeshXrBfNV4mwO007BKEc2OfKQW0mzgCeJla1k+CJXQg0ICHwrrDYVtLESWBVoiVxACoCNmih9gomFb4NlXTSXHgJv0aHzsxZ0aA2lXD/quapKMKME6vPb4YPyc0j1QBpxggqABxVydOX2YeWoHQwkeBa+UeTsJruKGAiClqVuqV5TYoAc+cGIyR6GHPGcIV9SOT4e6v5AQjaPUJM5A31yFMejR8r3W2shkvJ9ShXpihG1hnfkhBC88bbdyNUELTIkaa5T1YhAxYHb85VDu1TPiAdapCDrrGJRId2cUHQ6S1otCQcmdGNmKpWjxChHdESeFYHQahdQKkfa58gumtBhNYpA+knRKNKJvqHqdk48BM0BFUR7jDGtUBEhQCw/pIAzJFQ1SE50jhUA1JojyFFFOSrPqZ9llVy9UrWs7vDtfCOiiY8AMpMPZ1dYVVzxqbEz8INhUtu0cpyEdPpf8eq4fvdwAvQgcdsoOapNXYwjbYKp4/fh+q99EAU9MvrAsI3mjh72vOAo/Hq+buWo7pXX1VOdp3RIcT3kCKiG1gbDajwsXzeQCmoxV3X/fu5bX48rfuom8NDKzYsmQAk1SdmSHEV8feeMUgpG5Rwj57awNnzfNQZ8omr9Hlhz9GcmVpUcL0rCLYuXNTm0K9a2Tqyd53U6sGEzgQ996EP4jd/4DbzhDW/Qj7385S/H5Zdfjve85z14+9vffkoHeKHCb1H83osehQDwz0l76O+N2Wrp45OiZarVfIb2nEzIIyGEKG+OjShHRCf9Fjq8IojJOVIqhdDJp7ZvkVKOrBYAeQ6iw2pV5Wg6nEDf76F0NU5B81Kr4SAgrDpZAEDgMRz49h/g0u/8EajIEOzaWFlvY3Iah+XP4Rh/pIIJIAVyykxVUTSoHClCKFUCKgAeYbJWnu+dk1UVSWHSIkelLzjTi5CtHEXENJS1c2U8RdJEgfnegt6de9HwzhAwJf60YCi6SgXkqHnVzyN4iJiXi/r9s58GDTq47Om5oZysyjmQryGkRE5oE0E8elp5y7P34Q5xI/7sw1+AQI4ssYwLBSrfdSBJYg7TOoQWGcio0OGoz6w6uROC7soydH+yWpVsU+W7hJIc9RMT7qqSI6OMZgO91UxjVgkh4KnqA0IRK+UoCHRfwFBH3Ti8AUsF09KEmr55AqCWQuNzij5iAPO6I7tCaNkdHNzaxOceOIot0208CZSqkSRHXp5X2tKUg2Q6BAkCywASViUaLP8lUnGAp55R4gg1rU9GhWZrEy2oAgCVt3LzwV24fvdw/qC9oQGMkrReTNa34Ujb/J6EI4oXgOGy/XWSIxWie6pTth7xqDdSqRmFCjkaqCZjUfV69bddNfwCdqI083VukPIwInxF/r7+eTJgHB3LUdyvD5/viek5ACaM1lo5Bm929AacNjxARv6YtEDpE4bIYzqUaCtHE40R+WCnGRtWjg4fPozbbrtt6PHbbrsNhw8fHvEMh5NBRP0ytEOAS+LhCrMgrlmLM/C4aFVMIKd31HHn6w+AWp2hN5JzREM7JKfIkXF75rrlQYFAZJVqNZVs7VsJtr7IIfTyQSo5R5PxTLm4yn5YkFVaPh29yEaBBwKAKjVgemPx6YZV6TJu4Ve2MaV9R3le643BnIyqpwdhGeCF2CJ37qpL/CAmI0OOOKl2WvcsRU6ZQBKgEgIwjThzHO8v6LBfOGY3rcmRYBA99R0w1P2B470YNVae++es1vDqY6WK0ljDG4tar0HAwfwDCKLR3xshRLs8Q2TIrF57pCxp0r+HYXkeCkhFUSlH63RF9uumFUZ/eUkv+N6A5QO1CBkTBVIrrEat69c2QKyU8gOa6Nvu0J7uP0nLljIoyYW66j3pZk8IBxsI9amS+YJQCEmmqACIRTACRtFHuSiSotp53bMI/795wSX4+L+6BS+4uizfFyLV+T1+kQ+fT+t3QY1SDKBSJEJyeW2KKjlilnLESB9adR1sDA0gbgYgVPoWrT4AAKiPCY9fddf3YcKavzaqHF01cxUW6oDso4x0RJgIwDA5Wm9YTSpHh1fKNXC9qhEwQI4GwmpeVH0dumXYqR52blPY1t+hUY568rXXn6cVh9VxjJonp+d2V5KsJ5aehDc3OveTW+ooE+V91aVexYvLVo68DfpYbQY2TI7279+PP/qjPxp6/OMf/zguvvjiUzIoByCyFsNL2xcN/Z0QgsiXfjUkx0rmDbWrOHj7Nmw/UCbXEcoqCW8nAgvZUEVcGVaTi7jecQoEJAMs4qOUI8+3c44KwMo5ssNq3vQBtPIckGGCQipdvjesGgFAPKCQeFuHZfi10JgyN/BYcsRl1gQ17SSaA2Xkg752xEsAv44fum4H3nnXJXjXCy/FKMxEhmwwaWKpErL5QLgSKBcgbhFFXyfA5pjvmTJaf4SLNWCq2ApCtBEnKK3kJQAAvFgrR16/gEjKsShvqFHgVpK6zw6A0HCscgQAXLVBEZl0owYABioKEMs8MtKhQ9NXjVhJzidCLJ3Bc1qgs7wKlXPkWY7QAHQSuCrlzzJFjsgY5ahqAimIUUFtnsEtj6q61aZFOXQwRS7AhtUbq4Iuy2TZfZGDWscFHkUf8nosupWne1YoOvIZbt43pXOOIPpaaQqs3n36U1ofIieDYTVTiUZUqxhU82SUAahAAZ90IZRyNIIcbd3XQksupiIsCV7UbA0dB5Qbghf/5E+bz+ivT5VR2Nfah7ZX0+pRPobAP1Pl6PDqSZCjhrk+hpzBB8gR5kaQI7tdR2T5FVlhNCEItgTrX59bzYH2LSMSpCdmdoEJcx7reQ9sjP+UN2U+h6qivnTHBH7pB03rqua05S+3QWVwM7DhsNp73/tevPrVr8bf/u3f4vbbbwchBJ///Ofxmc98ZiRpcjg5hFKSpUJg3/Rw7zMAqEUcnaSAzwokeYHVvgmrKTQm2wCA9txcZZd8IlCflv3bYHxxcmopR6EhRzUURsUiQi8WnucD0iSQFfnYhGzMXIqpvEBBOWgOqNYFbX90JWQ9DAEs6t/9bbMjjxsHe7EftaMFgFzeGQkrpHDEMTlXnTgJqZpfMj8FCEEr9vCTzx8/Ec1YpnJMqmU+U4qcClcWoMIsQJ5lSOirii2RY6GfQCiCNSbeHwQMWAFSRso0EJREKRysXvFjxDLnKEkKLEtVq7GGI77vC50yzcIbUADw1yBHTOeeZMgKLjkHAxFFJaxWiwfGRjwQkWG9qMtFVhBgfsmUOXutqgqmrC0EBKgoTDsTEO0CX4EYVI7MZkB3fhECvs6kpmhaC7Eu/JLqKBXDPcKIJmwEuSi/a1bklZCizyj6snyfp4sIV/+3+dsIgmHISQ4hfYwCf8S1T4nODcwZqfiRUYscQeddkSo5soxffdqBNt8c0RiaUIKt+3dg4fAD6MnmwtEazWS3XHQxXvy2n8b9n/ssLrrxlrHHjQIhBNfOXosjrb/FjmMCRTQmPHuSypEiQ8d6ZexIKUnrge11FA2QI99KTO6TAMHksDcTbCIWtfWPtlKUd3dhYp1FKwAwvSXG9x63xzE8t/DJOfhZgq7sndj0+9WxWAimrVQCuek+sGsKN++zqiXDEGGjid7y0oYd0DcD61aOvv71rwMAfuiHfghf+tKXMD09jT/90z/FH//xH2N6ehpf/vKX8QM/8ANrv4jDuhHJhWtXmiGa3DfymFglespvcb4jJxhrslI5Net1xlZgIQehVSWisHIQfL2LLRDTDKq8S4DoYzgLoXbcvlXRNkSOeIAW8ZENGMVtH+Np0xhoMuvv3Fjynp2/MFY58lU5dbmQFNTHVGOg0olVyZsXr2/xnqpN6HAJEVXlyLfK9KndeLZCjsxCt9Tv65BR0B69645kiCqjAJE2/znDkHJEvRg1Vl5DSUqwnElytIZydMPdLwTgg0fPRuGXE2AwbuEBwH3VADWz1ldehkhtcjRgmwDi6TDqetBsT0Bdb08vmrCT166SI6pVGhlWS01YjVfMD6s5R/Y3r8LI1AqrcV2FR1G3zodqCqvUm8EmvQBAZDgvoxyFcoAf+OyBR9EXTfluCeYO/Y7528SwkmpXHKok2/plwyoEsXhtQmlVObLPR676LVbDavr7RQEfhhwFg9+nxGAuUTxGOVK47PY78IPvfg+i+vqVGYXrt92G/3UJQdcH5veNIWFeCKhcIb9ehqnWgUFbgJMOqw2U8kdWjtxT4V5ghNP4epSjfOWANBhdH5qTA7lO9eHzRSdnEKZl6J3lBeIwHWo6q4c4ZVXAyXWFjyDMF994C+JWG9O79qx7rJuFdZ+t6667Dtdeey1+5Ed+BK973evwe7/3e5s5rgseu4IJYAVlF+YxDq1xPQbQ0zkK851yN2qTo8nt5XO37h8d4hkHGvlDLtwZI2jKid7uJB7CyPMEJgeDUx+qdQUvcpj6+AFyBKDJmjAaBDC3UsfkmDBg25owUk+Ab6AKDxggR2NCUVB50XIhyTnF9gHTRMps12/Ar6/P86vmeRB5DYSvgEIpR9VwpUChXZ0JBDzL28T0sirQ6QuoQUSt0TlOoSZHRZngy4CMi6GEURIY5aiXMSRKOZoanUcAALfedBX+ad870J83392aYTWdx5OjKGg5XxIGlvcrykh9oE0IIR4oNqAc1SOABIDoYbGTa1rO26OVo7JZbI4sM8rRaMPUsvEst5Sj4ZwjgOscPIaG5RjuaXmpDIXREftTIt2BC0pB5L1FB0JgPqNIZEVRRimaq+Z1ooGGpOpzMs9HnibSMR/Y+oY3Dr+3R6DSvPqMVYwkdbI1EUChrk2jJgMA141NCwS0C6WwBY3Ri+Yg8V5LOXqmuH7L9fiP11J85mqCV8+uoQiFLWD1SBlSW2eO26BSdPLkqEp+QmsjON+4FLswAoM5R+q1uJm/s5VL0QjXT45qA5VoXq09dAxrTcCTofEoyeDXMiAaJuYAELQndXGQYOV3zEeQtRf+67ehKHJdKHEmsW7l6O/+7u9w3XXX4d3vfje2bt2K17/+9fjsZz+7mWO7oPGs+j58/PHD+LfzS0BjtOoTqz5ecuFckMpRaN1gV9x5F/75+38VN73in23o/VkjHMo5ygjBJXOyK7blqROQRIeXBEyeAmOKHJW7ch1Ws43lJBrhLHoqHwU+rnjkAbBBA0iJdt3cgJ2mAKltrJWAMYIcrxwRqRwpD6O+B7QHkkAHW2/V2uvLgwg8BpHV5OvLvltKObIq0dQCBEHArS09t0KBtONDnfxQhlAHUZMeSTnJUMj8pJ5XDJEj6seoyZyjTFAkUrVonKBVw5YdZgLnPgUb0W7C/F2xzgy5Us/AQfOkElaLWoOLiwcqRnuojIIfebr5bL+njDUZ2MAOmFkNlL0iR26RI27nvOmEbFLJOSqICT2ZGgPZLgcow2o2ORoIbbMRUzDTTYYzGP+L4RJ7VcqfMwpuNZ7lY5rz6hCyPI/hCCLCAjOePvfxb++2/dVUJVpRte6wNjrGZkIggMmFGrRQUBgM2UYjrDtOFS6duBQxjyEoQX3L1eMPVKGpdYbUAKDmV+eRjYTV1so5Cn0fXZXAPFG1b9Gwq9VGKEdF2kDR3zrUV20t1CbMPV96bA1/LySOwWXhQZSm8Go5MHvZyNeLWlNgKK+/wiu/c88fTYDOBmIEbIAc3XrrrfjIRz6CJ598Er/xG7+BQ4cO4QUveAEuuugi3HvvvXjsscc2c5wXHIgf42CSImpsrVTx2Ij33wQAeIiV+4lRYTVKGbZcdPHI3lhrgbVrIKR6w+eE4sDW8kYMrD5jIaqJsmqyZMz4pvCi0NVqAnRIOWrUduHodBs8ei62ZTcjyDpjjR0n4ynI9Q5pq6hMCOuBbQQ5nhzJ8cmwWjdIhyY8xqtKUW1y7ZCAQsgpRC4JSyYdjJVyFJlwJdGhHFHJS7G7oDd7VgXTmEWx1irHXZAMOZXkyC+Gw2p+DR4t4FnNI1kQndCNuD1j/j7YV20QXCfSZmVTY6BUjoq0GlZrDSS/Ew8M60/I9iMG5fHFZWI5EWTICoBb94UnMtMIFwR8RD8wAMhz4/w8qlqttLdQ1wbVaiuAoeueDTadAsDq8joSuS6FHxQwSp8j87pd78QL32CFVzgiNMWt72/7jlnsn7WajVr5RFT5Lw0+X4fbc9x1sXouQdAYfQ0NJvtH62wgfTLglOPa2WvL911L2VF5R+tMxgaemXI02zD34WDoi1KCZUUqZocb5QIYS47UmFj/MgBkQ2G12pRVOSYyIBhBjghBIIxy5DU9oL179OvFLdTY1eDR81AE+wGUG6mzGRseXRRFeOMb34i//uu/xne+8x289rWvxW/+5m9i7969ePGLX7wZY7wwoTxo1rhBDzznLuy/8VbcN1OSpPnV4bDaySKcjHTiXAkGEnB9I9uOtyHRyxyEVeHCeaBndS6KshcZqqEIhdbEJfjm9q/gkW0EWw7/RfmOY3NoWugp/7vmeCl3Lajy9HHkiA1I0J2wP2SixkLzGQg8NNvrGwdnFMjLiasra4tnGuWi4seqB1aOXPpCEZ17JJ9vLXK1rvnZq4/+LHWrV1IhlYg8DkEHyu14UH7fqmINAPzWiT9Ta9aMYa2QGmCUGgAodJhVKke2AWa9jsrSSzxwsv6wmh9yXRXjZUb1GYQq1xfS1TovFAEjlZYXmqgKVa1mh9VUzpEi1AKmbzRBIzCfyx8gMWyw5BGAJ8mRsJWjgcN8TlEQhkK+d3ew7cQI+GGVoEQjyHRoXUP791XnHpVsbStHg2fUJH4X2B2rQXN4I3xygFE5R5vrjPyWq96C5+18Hl6050XjD1LkaAPK0SA5Wk/rEIXJmo+3PvcivO15+yshSoXfoq/Ex7Pngu8ZttABUA2rWQnZP3jxD+K5O5+LyfSF5Zg2QI7iqS3QijRLh5rOKuzpLmNucQW7ji3B27Wn2nnYfj0vRpz0wcNrQGTlLR+jHJ0teEbU7aKLLsK73/1u/NzP/RyazSb+8i//8lSNy2HXrUBtBjj48rGHNKdn8fJ3/RxW2qVytCL7NUWngJFHDd9qXAsArNKXqlKmjwwmRdWEGTgL9aLCBzyBBsNqk629mI+fwqcv/Rg+d9nTWKnXEF177ejB+XWkcr3h9XzDyhEAXHLLsxC32th+4PKRf2cDbtNJPRuqKuKWFwiFh3o8Pjdn6PWLcrLpp+VrbG+X51aRI4hcJ+MOVmmVVVSyQkSxRDDQMRUejRk1rgI5KRMo46ntQ8cFPkdX+DrvCACiEcm9g2jP2srR2hMwt0qwM+0JysCKaliNxxFsd3cQD2wD5CiIOFRvQaqNFIfJkXEkL32l8lSpZmQgIVsPpNJ4VhCqHeGp1VeMa/JAUbeItjeoXI1YTLQxqciMcjRwnCp6yKXy1Q1OrAz71v1LKB2pCHqT5l6KB/oVMqtYgIzpqelZRqm9ZVklSDj4YPWhRNRoasLMON/0Eu7r567Hrz3v17CjsUYRxyV3A/EUcNHz1/26g2So6W+M5P3M3QfwjjHWH4uXvwG/0fo3OLhjzL04JiH78unL8aHnfQgvvewazDUDXLurve7xsLiNSHqe+V4+lhy1WYHrH34KzV4Cb9/ouRQAarwGv/vdymNnOznacCm/wt/8zd/gt3/7t/GJT3wCjDG86lWvwr/8l//yVI7twsbc5cC7HlhXQuCgUjRq97FRBJ4PnpvOViAMTStJ1rf6TvlFIfvlDOQccZNzRIVd0TYcVputGxn3/34Ow/yL34sbd4yZwIIGViYKTC1QzE32gHjjytENL/0BXP+SV4w1FeRhBGDBPFAf1TjTapsAjlq0fldXnm8vG7P0ywVo+0S5KAR6AcuQF8rOoUoKCGMgoBAoEPVNPs241hrx9CyU+7jaDW7bPlypFHCGh8RWxMyQo9oalWoKVeVo7UXaTnI2tXgcbCDniIWhTODsymM8eKxKsNeCHxnlSMhWHnwEOWKWcuQVGfI81ztGXlF5zHNt5aiwNgPUaq9hyAPRvdcAIAgCWO4Yo8lRaEKPyp3dTvYGzD2Wez68frLOsJohQ2G9MfLaD1vm+47r1U0HC63rS4z+LgKrAildVN8dBx/TMJgQgsbkNBaeOoyo2Vq3yeem4qa3ADf+yLqTsQHApz445ciK8l7dSM7RifDL/+wqCCHGn5sxCdkK73rRpXjnCy/Z2LkNmqjzBN3ch0fzkWE1QCnsGQgVYBeN2cyirOZb8r9Xeey8CqsdOnQIv/iLv4iLLroId955J773ve/hQx/6EJ544gl85CMfwS23bMx7wuEEWOfFHHrVr7EZbiy/aBR85g8kwDJMWeWdzGNQC4ZHsmpps1wkGAvMMaLQP9vGeQpzA0nV0/Eai7Jfxx03PIX6y4/honpenRw2gLUmCz7YjDYentz9hlFBqGBo1IerhMYh6t+K1e+9A+nxZ2Mi9nQJb1A3lWhCKOIzKtemPH9hItUl0JGtVgAgmJwCodXxX7pjuA1BwClek/wcvhwb+X6tMn6F+mQIKsvZT6QcEUKg3DNzXbzIQYuk0niW+vGQcsSD9ZN+P+KArOYqUOaN1dlw2FFXzxEBT2QotDljNaym06MA5EWhlVJBoLuqU2aUI+XyQAamWD+sJsHzEd9ZFBn1UClHGLhfFDnKpOVHZ0zTYRvBADkahaje1j/XBlqt2I2XiW5GXX2+/brZsnI3ZxV370Go0NpmVqptGBskaYSQCiHaSFhtva8/FmNyjtb9/DGvqYozfDpeOaLSTsar5SBbR/vxASU5+j+/7wj6bEU/Ni4h+2zBupWju+66C5/97GcxMzODN7zhDXjzm9+MSy/dWHm4w+YgsJSiS+bqOLj1mU8yAQsgilWo3keEUMxa1VDlvUYB5GBFoYv5hdVritvkCKmuaCusPA2FrXVLwhcEM7U11CC/hhoTqEV9IJrd8ES2HvhRHTY19BvDC6tfVz5OZap5vb5+S4HI4ygWS7VMqUYA4Nk5UNpXczicVCpHgC9jUwTjJxp/cg4gEYAl9S6Ymxme7AJOsYQ6jvFJKKramjkxOaKUoDUdYf7JzgnJUXk8RZEXyIlxyGZ5Umk8Cy8EIYEh3cRDsIFS5FI5qhKRdms47KkSsoUMq4nC0rMqCuygclSiANXKELPIpxaNKwAAQhlJREFUERHDzwOqoS0A8EYkfccyp0yIzJTyD4XVyuclkvR217HQ2AnZ43yCIssJuTbQGd0OmelwH6rhNd8qPc+Xlbs5W7MnXl2RoxN4HJ3tqHk1LPQXAGw8rPaMwDzAqwHp6kmp6CPBA9Q8aeBJcyAcoxzFkhzF4yvVgLJyLvMpnmg+iL3z5cbsbFeO1j3bRFGET3ziE3jpS18KtgGnZYfNh7Di/z9+537LjO7k4TEPOV0oF1WxAoBhi7VQljsRlU9kwm+C2NVqNjkyC3xBKFoDRoHNMILIQxDWQ5HXMDEmRwHAunZKzxRe3KiQo1E77dKJmEOFq2qN9ZMjm9CqfCOgmmytGgaPqtJSisRUCqxgdLKxAo2boCQwblQkwHR7WAlTY1qyFJsJq9/RWmjNxpIcnVjBIJQCOVBo9sdBi5VqMiePQOGb0BvxEAw2aF0DfmCq1RS27Ns/dJyxpCjARYo8y5XF1UjlCAAKK6wGYhyulaEkhJBVaMOhkHDA+M4fQRpqsQmraeVogESpDUgP5WcsxiTCVp5TUY5GKxv1miEotbhKVrxaBLUZ0OMaVI6iGgAfQAJhk6M1NjBKOTqRAeTZDrtC7VSG1daFF74PmH8YGGMYvGEQgppszuutpRzJeZpPhMPu4pWXI4i9GIeb3zPk6BSkf2wm1k2OPvnJT27mOByeAb76yLz++SVXbswQcRyafhN9tgBCaxD5Cggo4iGCoKqp7NJmauUc2WG13FKO6FC3eo9RiKwBwnoQWX2IPFVgS9anaqc0gCCMsWr9XhtRYuzXmyCEQ4gUhORgtfUnZNvdzrdZRIVZ4QdRlBI0X4McqX51g+Gb6sEEjHqaHBEaotkYzk9SuTPHC6O4TM6tjxxdctMcjj62jF0HT/x9cM6RpwlyksqxM1Ax0FTWi0CIfQ14iMd45YwCoQSU2Z+RY+t1140Yi93/L6mE1ezefvbSnuYF9MVskRLqqfYahaUcVb+XcMDc0h+RJ1ZXDujCVKvRAXIUyO9qtVh/CN0mR+NCWLMtM3+EA5WcZVitVIvHKkdBCEIjiCKB6Khj1iZuF11/E+7/3F9j/423rvOTnJ2wXbJPdVjthLjxR075S+6bznH/0R4uaswDY5rWNq7cieWvPYzmdZec8PVqvIYnmiYp+7xNyHY4e/C251+M//Q/H8Av/9CVZZn4KcBsPIseX0BEJqHqbryBnSQIAQTAikKvHoKYRRaUl4cIwLNctAsyTI4AgBQNAE+fmByNqc44lQgGKnnqI0ra/foECFh5fki27lYDQDVPzFaOqO9DhTIVOWIYTn5VzsqpWqPXUI4AgNmOlSSAFw5PTMqIcgmWcjSzPsJ38Q1zuPgG6Wi+urrmsV4Qot/tIJOVcyB82P3ai0Bgj9lD1BzjZj7ufTxfq3+EzaJ91bCJnq3c8CI1xpsgIxOyy5yjHKPIkVGaTFhtUNEbdGT3o2E1rKEVsgwoynNEBxKu1Xe1IjYSajTX2TjlKIzM+Ab7W3lxDSB+6e4tG+sOOhH4QU0qdosQfeWivfYiuO2Sy/CvPvxf1vsxzlrYatFpDattErbPBPhX+VdKRWiM8ld79nNw8cIngBe964SvF3sxHqk9CtrM4Rchovozz43dTDhydB7gJ593MV570y7MNU9ds76ABUijDuJuWTFGBIUXD97wqhKtgJ1PFGpyRKFDb1b7g2yEcgQATDRQABB5Ha21wjO2G+1JeBytB2Fo3iOnApO1YRLmNyfARBn6oXR5rMfHKASWErDDyjkinifziXJAhtVGGcYSSUxzmRh7InLkeR66Pflc+CNdrJWatcSbEAC6vHZCA8iTQRBHWFkAciIHBDZMjngIailHhHjwGxsj/l5gkU6+BfHUiNCoZS3giRTIzXVaMYFU5F8AaWG1y7HYAdPHC/1tDPkADahf/ojwsepbKEQOmveQA0NNcNUGRLlkrwfVsNroxdsupfeGyFEEQusQeRdCKGI7EDZUVYYARGb6r10IsJWjwV5r5yRUhdqYSjUAwHVvBC563roMM+fiOTy89DCufWsb10xfA+admo38ZsGRo/MAjJJTSow0WgJYKG9yAgovGuhKL0X1smxZVe8Q7fuijgJQcV0uCBtZUceLNhIAImuuXXE3pgv1qURoGeb1vBytEfH0sDWNax96FEdbM3h6630be/2KcmQREM9DqRyZUNoozqWUI5UKP8pp2YZNAigZfW4VYVvlNXxy7iW49tL1m+BtBJFWT6ycI0tZBADwsKo4EK9SHbge2GEhzqZHTsZ2WIsjRVGYBd0fU8pfElJ5vdveTFo5Uq1yxFBIyRsITYfRcO6X7SJOiz5AMJTQrJSj/gbI0Xqq1ZSixDiveFIB5bkipAaBp/Vjg8qRx32d66XO5aj+cecjVM5RzGNweh4sreshR4QA7ZEd34bwC7f9Av7p2D/h5t03nB2WDSfAefANOmwWvKkA9NGyrSwBAY0HJ1SZiGp5nhRD7teykgem/JkyNjJpvJ48B0/Od5HM37x2WI1xgIdAdnIeR+uBrRz1/WKkTB40pjG99BSml57C8b0bk4htLyq7Wo0QokmnwmClEmB242oHT9na72+Hb+hgUziJwCIPj8a78BM3DZf7nwrUBvJdCOHgZIAcUQpqt4iHh6C9sQ1AbOWJhfFomwXTCBfgIkOqlCMBcM9WrsxzktyYntrKEddES4AIOvxEACyKUU67pVIWjkgyt/vP0SIBGMAGcpN8pqrV1k8YPbtabVyrmfYEnvWaNyBqNIcWMM/jIHQgHDdwaVJKQYhx0i4POfsXwlMBpRad9nyjzYJKwh6TjL1R7GzsxM7G5my4NgOOHDmMRTQ1jaCYRkanEGA7iD+4y5WTnt1OgVDctGeYsHBruadj+rw1+FY88vgPglMy1IBxCH69JEeblHMURWaC6/vFyF5JcW0KT3EgyACyQW8pFcIKPYqJgRDioNowKgRGCS1PuSRHjKx9viIr12UckQoG3ueOS9efYL4RNIaqkjgYGZ9XBTAQQhG0N7bohM0mqFdWqMXtuZHHMLujvMiQFmYc3giliQBI80wXINgxT2618FA5R3SAYNAolHlq0s1+hEWEUWxykHwRYMDsRLXQQhU9JGOI7ihUlKPa+AXv5h941cjHPc8DBsjRqE0OGVhWyIgWKecjVM5Rwzs1ZOKM4xSTo3MNjhw5jEVjaiuCtEDWeiOipQdLtaYCGdophN5JTzdCXLnDXvxUImuhLQcG8ycUQhnWaUXeiWVXvwZ0jm5azlHkRchoAV5Q9L18tHLkN9D3SnJEww2GfDzTNmTws9rkiAoGjCRHVeXIp2LoGBt2zyrmjQ7F+Jbit6UZbk6oFkBz0FiSsJHkiBMfIAEInQApUnitjU3SYTOGX38ZAKA2NZpYlcaNFEABBuNzREDg2/3K1PkGQT8zCdnEIpqeb5kkKlV1QDUhYWSS+AHErVHkyHw/CcsBMGyZrrrFB5ocrf+6q+QcjVGO1kLgeUPKEWHD9+mg0jlIEM9XKOVoI01nz2pc4OTowqD0DieF9vR2hNLUDCIrzcYqKCc935r7tk0OTJ7yb3ZYbbC/lIIiDGuG1BRUAuDEnhMfexIIWICMl+Pt+QWaI+LuAQ91A1xe21jislrctk8MP89OYGUFHfK4AayGpZIcNXl/zfdrThiFjQWjF1SbpN120dTIY04F6tODihQHH0HuGKEIWj8Cv/Eq8LwPOqbCahxsz6WgPvozl8qHtJ5AIVXQEh4fvXeskiPz3fhWfzOqLBYGlaM4snKpCIL68Pdvh/oy+frNndW8DqUcbSTnaD0mkGvB8z0QMqAcjSDugy1RRjXXPR+xrb4NALB1A2awZzVU493WGn3ozmM45chhLCand2KlX/pSEGRD+RPSDg4Rp1CLBRtYUIRWjkzozRuzOKsk5cZ6yNEP/F/AkfuBbdes67NsFAEPkMkFu++NzjkihKAjPwrfoKrRkGG4XZPDCbm22uAVKHOsBt9bh3NKpcM7gaFabWoK6hvzgxMrQndfsf5WKBtFfapKjghh4GyYHFGSa5drlvXANkiO/Mict3BM2XDZD830/xO6Wq2qHNkkJ8lMjzpSCasNn9dB5YiGIahOP/fgN4bJEWUMglCrRQfB9PNfUDnGKEcbSMiu1XSI62RadQSeD0KrStcocjTo+s02UMV5LuPZ25+NX7vz13D1zNVneiinBtf8C6A+B+x9zpkeyRmBI0cOYzHX2IrVXBpMinz4AFlOXuTGBHLQ9dSUNBtyFIwlRxtQjto7zc5mE1AqR+XilATF2NLcP76TYu+jArsv3dju6pU37MBCJ8Ubbt099DdbOfJyUW2roY4h1cf4mDwuhaDdlm7nnYqCMIhff+21OHS8g7sOjs7RORVozg3urPlQOxkAoMSU97O8DxJvTJ3zI3MthmPamhAqe70JgBeFdL8ur9tKYYEq5QfQzy3lyLOVI7v3WHnEUMg0ikAFLSvQCAMZ04VeMA6SlS5Ntcmp4VL+k1GOwgjP/5c/CkLoUJn+up4/IqzGR6iaYRBg0fr9QiFHjDI8b9fzzvQwTh28ELjs+8/0KM4YHDlyGIuZeAaPdf4RjaUb0V748ogjZB5GIfRqMC7ZGla5fzgmP2dD5GiTEbAAmVIzQr9MgB6BB/dQfHEf8H/UTtyDzMbWVoR///0HR/6tQo6ybCQ5avEOjlmRtNYaCbYA4Lfa4ME1KLJD2LJlPJF72dXbTjDyZ46gPQmQQIcEiSDgIxQIYlWwMdHfcPmvrRwFtTHKESWADHMRUVgmkKgQNvXWBECSj845Kv2JVHsN9bzBhOwIRJCSHIGDjjCB1K8ryVF9YoQBqVKOyPrJEQBcfdeLN3S8jcDzIWD6CQIAG1E4Ua/V8JSp9gcboXw6OJztuDAovcNJYSqcQkqP4cZ/+A9oLv/DiCPKiT8XZjEYDKupVaWcTiU5GtMjS4XVWtGZn0xDFuLpdh85EUhnx++yQ9kVvT4z7L58srDJUZBllW71+nHbAkgIbJtau81H0KiBR7fAb7wS8YhQzukECwMQYsZAhQAZQaqZRY6oSIb+fiL4VqPacBw5YqZHIEVeaRjrj7CkEAD6VlgNts+RH2BwSh0k1SQIQIW6JxjImBCnHXquTwznfylPqpww5KdpGueUl4SVWP5RwQjiPjDeUc11HRzOdjhy5DAWjDIkUXmJ5KNcmuWCUeRCe5rYvaqqMMpRPKaprOoxtnvyzLvLBjzAlw7O47/ddQh8Znx+xta50gtox9w1p+y97eqeIOkD/gjiYBGmidUegnjtc2arKN4ZbvBJCAEhlnt1IUZ2bafUqDgMGydHgZ1zVBsfViPSBoFDmDwfYdSZcszmOYmVkM0sUueHEfSUqsLMdLCsnYDpnoR0rHLUqJnzU58cVo607QIhG8o7eiZghCEn/UpozR8RIm9MV1XUcQUYDg5nM878Ft3hrEYecQAJihE5IVo5skIRfHAh120XDIGqj1Eu3nz7Xly+rYWb925Oef5G4FEPjDJkZHQZv8IvPfv/h4eXHsalk5eesvemMpcLAMKkh2RymMzY5GhmqQMyZpFVCKycG39yfc1kNxOEWKaUogBtDocFmVXBxmk29PcToZKQvaZypKrVRCV9uppzZCdkm7HY1WpeEOn8JZUoT0eElFT1FgUFHZP7Y5fzj1KObFWrT32cjmJrQgi6/HEEtK5TEP1wmJiF01OVsKk/xjrCweFshiNHDmuCxD7GkiO5YJTcqFwMxnnoyCMBAI366CTU0GO445LNMR48GfjMRzfrrulbMhPPYCY+tWP2qadOFYIsxRWXvWD4ICvJdXa5g/DAZWu/pkUU/BFNZ083VBUaUCZCsxFqFqNCnwfvpMiR+Zxjc45sckRQaRhrkyNbOSKWJxOxWozwwITVtJI6oo2EJkeCgIwjR9Z9VJ8cJkeUEniMIM0F+hvwOnqmWI4eQ7hklKNghAoctWtlfzVFjtZRHengcLbBhdUc1gST+UGjlaMSheWQ7fmjJ2ph7aabG+yufqag8olOd4dtu7rHz3JM7bho6JiVzISZ6kmG6Npr13xNL2B6gffCM78nsvu7saIAG2FKyKzyfo+vbXI5CutRjhijukEYRZn/VEJUK+gsdkQLQ9SoRWJY4GNwSmWjcqlk+T8VpKI82bCVo9qIhGzAqEffaF6BmYtHJ/efanTbxyteR1F9+F4OJ+u6vxoAeBs0SHVwOBvgyJHDmlBdxMUIcqRzjixyNBhW01U+wuym4zHK0dmGgJeT+igDyM2ETY68vACfGVamLt66E7QocPGTxxFddhnYiEXKBiFEkwU/OPPKkU2OaJGDtobPsZ3HG5xEZCaMPUxtr2Nqe219Pkcwfl1AtdKMEJOQbfeBs0vsy+Tq6pQ6ymJhThQAKCaTYVdwBbuXWmOEcgQAgazuvL9xAC98+8+Pfa1TCbJTWDlHFNEIE8toqlnJKfPWsI5wcDhbcV6Roz179shkT/Pfu9/97soxjz76KL7/+78ftVoN09PTeNvb3oYk2Xiy54WCiYsuBgDkEyOkcRVWE6YSjQcDE6FaX4T+H7wT5MecLThTylHAzKLpZ/lIcjQ7OYO7vvkQLn5qHvENN6zrdVuzMUCAxvSZP/82OeJFPjqsZvU2C06C0BFK8KqfvQGv+rmbRvYAA6rkCACoVa1WeS3rVworUdzK/WK+P9RHbFQy8qVeFy/8xkO4bv6hsWO3w2onUo4AoOafHjWwvWPGyhejCOJh4hM2AoCaayzYoHu8g8PZgDOvr59ivO9978Nb3vIW/XvdctXN8xwveclLMDMzg89//vM4duwY3vjGN0IIgQ996ENnYrhnPW7+kV/BN6Y+gOfe+bqxx4hCQOccjQmrQRBA+gLzERPq2YiASeXoNJOjBk/NGOIa6IiKIOJ5UFGn6Ibr1/W6L3nrVVhd6KM5debPP7VMLFmRgzWHz3Fo5fP4J2nvQNna+z9tAomqhcIQlbLYkQejHNkWBDRQHkAGQwUKKI0geZGNzTcqn+fLf4OxlYiqmo4SY4Ox2dg6swe01y1rB0kALxwmPkHEKwn3fnyedKl3uKBw3pGjRqOBLVtGtz741Kc+hfvuuw+HDh3Ctm2l2d0HPvABvOlNb8K9996L5ogJ+kIH83xc86p7Rv6N2GnWQpnCDcQ/9KJCoVtdBOdGaW8oG+2e7rAal2oEywsEA2XRCsXqqv45vn595Chu+oibZ0flELMSlVmegY0Iq9Utoh1skjEoIQRE+reXzT5G5zbpVnYAmEWOmB3v84Zzjkbl4KkKtXGVaoAhR/XJybHml6qcv+bzDRtknix21Hdgvv8VLDfvAqENePGIXDGPgsCyODhByNfB4WzEeRVWA4Bf/uVfxtTUFK655hrce++9lZDZF77wBVxxxRWaGAHAi170IvT7fXz1q18d+5r9fh9LS0uV/xxgyvRLdgRgYLEA9GJTFknLCp4Ru+mzEf/isn+BO3bcgdu23XZa31eFasaF1AAgffwJ/TMf4YNztoNTDyy4Giy8GaxIQUf0+rLdl/36ZoYCTW8QXa0mqiTJJh98jHVF6WRukxQONlI5Civ/joImRyPK+BWUchSfxhyyHY0doNnj4MGVYN4esHA08eF6300QjDF9dXA4m3FeKUc/9VM/heuuuw4TExP48pe/jHvuuQcPPfQQfuu3fgsA8OSTT2JurtozamJiAr7v48knnxz7ur/0S7+E9773vZs69nMRRLsGW8RnMARE7WNKnCvk6O69d+PuvXef9vdV5MjLc/AxytHkG9+Ilb/6K0y/9cdO59BOGRgr4MXPL38++pmRyhGz8mj85iaGAnWyNS1be4w6hJrkOQ4T9rSVUsIYCKjRnog/rKQCoFEZiqJj+qoBxhJjVBm/gq0cnS5sr2/Hfb0vIuzeifrK4+DR6F5iHuXoAQA4vHMkjO7gYOOsJ0fvec97TkhMvvKVr+CGG27A29/+dv3YVVddhYmJCfyzf/bPtJoEDPc6AkqDwrVk6XvuuQfveMc79O9LS0vYuXPzmp6eM9AGj6bKh42ZqO19OPfO+svujKIpczQavXSschRdcTku+cqXQc7Rpp6U5vqioEUyMueIW+FXv7WZeSumPXK1UbJ1hK0ciUIfVWkIyxhs5YgQH3RECFmF08gaJe5t2Zx3ZvfescecCeWoHbTRDRPc+qX3AhDwo1eMPM5TPecIhxefHaFcB4eN4KxfpX7iJ34Cr3nNa9Y8Zs+ePSMfv+WWWwAA3/3udzE1NYUtW7bgS1/6UuWY+fl5pGk6pCjZCIJgbCf5Cxtyx215GA2aQBKqwmpmsRnlGuxgMNuewrO+fQhxMp4cAThniREgPYykXRDLU9BR1WpWiDaY2MSWJ0o5EsRSjgbIkVZAAa7soUHArTGW34ftjeSDjVCHVDhtLeXoqhe8CFv3X4LpXXvGHuNLr4PTqRwRQpDHPghKg0cejQ6rhX4sj4/AYzd3Opx7OOtXqenpaUyPCS2cCF/72tcAAFu3lruwW2+9Fffeey8OHz6sH/vUpz6FIAhw/TqTWh0MiBWOUGCDO2WdzmGRI9eIck1Q30ezV+bK8ZmTu/bPdjAGTY6oSEBHlHvzyCJHUxObNpZqeFg+NphzRG3lyJAjf6BUnwwpRyMSsmsloaDx+BJ3Shnm9u1fc9w6rDai+eumohkDkhx5/miCV48mwOMXgrKpc6Y61cHBxllPjtaLL3zhC/jiF7+IO++8E61WC1/5ylfw9re/HS972cuwa9cuAMALX/hCHDx4EK9//evxH//jf8Tx48fxrne9C295y1tcpdrJgNjKUQk+mJCtjildIAEQUOrI0VogVk7WWsrRuQzGiVpfQfnocDezqrmC2U3sB0dUyw+jHA2ORvsXEQFPh9XoiPw5WzkKRpKj5vd9H3r/+A1MvPpVz2jYOqzmn977yWu1AMwDANiY1iB+QMCDK8rjGy4h2+Hcw3lDjoIgwMc//nG8973vRb/fx+7du/GWt7wFP/MzP6OPYYzhz//8z/HWt74Vt99+O6Iowute9zr8yq/8yhkc+bkLolUhsyAMVefocIRZUBzWht2h/nwlR9yngHQjoP7ofD+uqrqKDP6mhtXKf0RF9xlQjtiosBrg8eoUSuzrm3jgI/KK/B07sONDv/4MB22Uo/ppVo6Cme0AHgagKvRGHBNxxZ/AzxFHfAcHG+cNObruuuvwxS9+8YTH7dq1C3/2Z392GkZ0/oMYdqQxWJ1j/GFMEqvD2iBW3ta4arVzHdxSO/iYXm/1bVtB8yOIu0dA1ghBPVMQK3dO/Txcym+FjjU5ouBrhIgJCeBvYhl7oJWj0zuNx9NlSkJGx+e9BbEckyjAXG81h3MQbhvvcPIYEQqhg02whsiRu+ROBBVWI54H2tpExeQMwjYC5WMMHqOdO3Hrl9+LGx/73c01OVTqJiGa6A9Wq1GrlJ+pa5kQ+AMhLVIh/z78Eb3HThUma+W9Nts8veTjol1XAwCyNaJ5Qb0cGy1S0HO4cMDhwsV5oxw5nH7YzTgV6IDMnkdMHlPI57iJ8kRQYTU2M33anI9PN3wrn8irjQ678JkZXPSBX9z80KJVrTbQDNAcwqh+lGtVicAbsqWoJmT7tc1zh/6RZ+3DjokYL7lqK5Cfvv6Q+/ZciwcBBCPcsRXCRgigsFQ2B4dzC44cOZw0TFjN6mQ+0IVcBIocZeqI0zG0cxqKHJ2v+UYA4NfNwhqsoa40XvCCTR+LKRooG4kIwLZyLI+xlSOZkE1A4A2F1aql/JsZVpuo+XjtTWWxCVZPHzny9+3D5A//MPy9e8YeE7YiAKugIht7jIPD2QxHjhxOGirfwKJGQxK6+T1TR5yWsZ3LiK67Dt6uXWi95KVneiibhsDKQwlb4xWI0wFiK0dCdVkbCKsxQ4KYMO1DvIHGtoOl/H50/lVmEkIw929/Zs1jtt92GRp/8j+wZTJd8zgHh7MVjhw5nDx0CE0tJMPER0fRlLzuwmonhL9jB/Z/6i/P9DA2FY1GA+q6CTfV/Xod0DlH0OSIiIFDdNWlsMgRgc/WuJ6JDz+6MKfYYLKJN3z0mVkVODicSbiVyuGkYZQjk4MxCMrUY045cjAIrZwjv3lmu7bTSv8/qRyRwZwjowBRq/LS49VrngyG1cZU4jk4OJzdcCuVw0nDhMzGl+nTNcIODhcuPMv92p84w2E1TfKJLuH3B2ZGyrUECmonZA9d3/bPXunn5ODgcM7B3bkOJ431KEeDPiiD4QqHCxN2+b7fPrPkCFbfNCFDZnTgUlau7gICrJChtxOE1TjWbmjt4OBw9sJpvg4nDTKgHI0KmXlxNSGTFI4dOQC1doCpY9+Al6yAt19xRsdCLeVIhdX4ANGnlbDaWsqRed4Y428HB4dzAI4cOZw0jPdLMfaYYKJaykvHdPF2uLDAggBXf+P/Kn9uveGMjkXlE5UJ2VI5GuD5TJfsCzDLusJjgzlHBh51GwEHh3MVLqzmcNIYyjkaUYkWz9QrRdFsE9tAOJw7sPvHscaZDatRy+BRXct8MKzGDDmqJmQPXvO2cuTIkYPDuQpHjhxOGlo5UmX6I9YCLwiwGpkFgzInVjpA90ojngd6lpAjAFo5YoPkyBvOORpVym9RI/jcuUM7OJyrcCuVw0nD7KbLRWBUaxBOOZ6cXMX+x8vFMEvcbtoB4BMTmPvZnwVtNipl8mcClFthNaUc0cGcI6N0Ue2QPcIEUj2N+PD4+HCzg4PD2Q1HjhxOGoPkaBRyQvB0u6vJUdJz5MihxOQbXn+mhwDA9AMUINqs1BsgR9wq5ef6EiZgdHTOESE+fEeOHBzOWbiwmsNJg1pJqsBoD6PH0yUcbZm+T7O7zs8u8w7nLphWjggU0eeDida6Z6DQXkijrCuYeoz48IKhPzs4OJwjcOTI4aRBPW/gkeHF4prGXsw3DDlivLvJo3Jw2BiUclQQQEApR9VQH/OUulStVhuEJ0PLhPgIAlfL7+BwrsKRI4eTBh3oiTVKObpx+kr81pGn9O9HHn5w08fl4LARMEnyBWDCaryaccCsPoKqQn/U9U5V3h2J4LnWIQ4O5ywcOXI4adBatSx/VEI22fMs3Hjru8Dl4uJZPbUcHM4G8ECSIwKosJrHq6oo91WMTOiE7FFo8mmw8Gbw6BYEtUFl1cHB4VyB29o4nDS2NbfjIev3kU1lmQfc8TN47e4H8Vf/5TfxrNecHUm4Dg4KzDP5REo54gPkiFrHUCshexCcMnj+zQAAv3bs1A/WwcHhtMCRI4eThuf51QfW6CM1u2cfXvPeX97kETk4bByeVIVKh2xFjqrZ1N46E7KJZfzo153hqYPDuQoXVnM4aXhRVPl9VA6Gg8PZDi5JfpmMXZIbNlBswD3b50hVZw7DruwPWq5VjoPDuQpHjhxOGlPbd8K+hBw5cjgXwQNJjoTx6+K8qooybpOj8a9lK0fBRPMUjdDBweF0w5Ejh5PGloumwLxp88AaYTUHh7MV3FdEKNWPDdpUVJUj9dOIajVrRg0mJk7RCB0cHE43HDlyOGkwRrFl7z7rEUeOHM49+L6qoDRVaCys2lTYShId1URQwlaOwunJUzNABweH0w5HjhyeEQ48+3r9cwpXpu9w7sELBq9bCtYYIEdW8QFZK+dIdxkp4DXPbENdBweHk4cjRw7PCNsuuVT/HHWPnsGRODicHPxgsLCAgkbVSjNbOVqrWk0lZPO8B0Ld9OrgcK7C3b0OzwhTO3aZX7LjZ24gDg4nCT+MBh6hoPEAOfJNztFawWMi2RHPXZscB4dzGY4cOTwjMKvNwkrkr3Gkg8PZCd+vehoRENC4SpjsarW1wmrKJJ4X/VM6RgcHh9MLR44cnjGoCx84nMMIwgFyJEYoR54PTYfWCqvJfrW86J3iUTo4OJxOuFXN4Rnj1e/9ZUSNJu5+69vP9FAcHDYMPlC2T0aF1ZiHQTI0OiG7fNQTTjlycDiX4dqHODxjbLvkMvzYR34fxPkcOZyDUI1nFQgISDQqrDaoHA2j4XXKf7Mjp3SMDg4OpxdOOXI4JXDEyOFcBfdY5XciCGhcbf1BGIOZLstrfdQlv621jFu+9B5c1vmrTRipg4PD6YIjRw4ODhc0uM9hB8lGJWSX5EgpR+bIQZBd1yPuPg2+8/JNGauDg8PpgQurOTg4XNCgjKDcJ5a91YjAUM4RZbSUiqyI2iittPGiu9H9xj+h/cpXbtp4HRwcNh+OHDk4OFzQoIzCJkcUBHQg56gMG0uhXRKkUeSIT01h2y+9f7OG6uDgcJrgwmoODg4XNBgjxqAIpXJEwmpLkfLPig6V7Ii6PDsHh/MWjhw5ODhc0DBhtRIEGGr9UTpfD5byO3Lk4HC+wpEjBweHCxol8bHJ0YhEa1I9pnxskwfm4OBwxuDIkYODwwWNQeIzjvRouwqdc+TYkYPD+QpHjhwcHC542D5dJ1aExvdWc3BwOD/gyJGDg4NDRTkaR3vKx4UiRy6u5uBw3sKRIwcHBwesRzlSfyhL/pmbPh0czlu4u9vBwcHBIkeqeey4Y4QiR4SNOc7BweFchyNHDg4OFzxIhRyNmxar5IhSR44cHM5XOHLk4ODgYMXSSt+jkQfJf8ucI0bc9OngcL7C3d0ODg4O1lRI+ehpcbB035EjB4fzF+7udnBwuOBh0x7Kx4XLHDlycLhQ4O5uBwcHByusxrxx/bir5Gh8bpKDg8O5Dnd3Ozg4OFjEh/tjyNFAKpLnbI4cHM5bOHLk4OBwwcM2dOSBP+6oym/e2JJ/BweHcx2OHDk4ODhY8MJg5OODCdnemMRtBweHcx/nzN1977334rbbbkMcx2i32yOPefTRR/H93//9qNVqmJ6extve9jYkSVI55hvf+AbuuOMORFGE7du3433vex+EEKfhEzg4OJytsJUjLwzX9RyfnTPTp4ODwwYxLvPwrEOSJHjlK1+JW2+9FR/96EeH/p7nOV7ykpdgZmYGn//853Hs2DG88Y1vhBACH/rQhwAAS0tLuOuuu3DnnXfiK1/5Cr7zne/gTW96E2q1Gt75znee7o/k4OBwlsBWhfxaPPYoG045cnA4f3HOkKP3vve9AICPfexjI//+qU99Cvfddx8OHTqEbdu2AQA+8IEP4E1vehPuvfdeNJtN/P7v/z56vR4+9rGPIQgCXHHFFfjOd76DD37wg3jHO97hGkk6OFyosG59v95YxxMY/HhcbpKDg8O5jvNm6/OFL3wBV1xxhSZGAPCiF70I/X4fX/3qV/Uxd9xxB4IgqBzzxBNP4OGHHx772v1+H0tLS5X/HBwczh/YG6O42Rp9jM2giAevNjo3ycHB4dzHeUOOnnzySczNzVUem5iYgO/7ePLJJ8ceo35Xx4zCL/3SL6HVaun/du7ceYpH7+DgcCZhi8bBmJzGKjx4tWizhuPg4HCGcUbJ0Xve8x4QQtb873/9r/+17tcbFRYTQlQeHzxGJWOvFVK75557sLi4qP87dOjQusfk4OBwDsC6/2vtqXEHWYd78Ju1TR6Ug4PDmcIZzTn6iZ/4CbzmNa9Z85g9e/as67W2bNmCL33pS5XH5ufnkaapVoe2bNkypBAdOXIEAIYUJRtBEFRCcQ4ODucXiOVZFEyMIUf2/olweK315CY5ODicizij5Gh6ehrT09On5LVuvfVW3HvvvTh8+DC2bt0KoEzSDoIA119/vT7mZ3/2Z5EkCXzf18ds27Zt3STMwcHh/AO1S/nHJmRXc478idG5SQ4ODuc+zpmco0cffRRf//rX8eijjyLPc3z961/H17/+daysrAAAXvjCF+LgwYN4/etfj6997Wv4zGc+g3e96114y1vegmazCQB43etehyAI8KY3vQnf/OY38Sd/8id4//vf7yrVHBwucNg9ZL1otM9RRTgChz85ubmDcnBwOGM4Z0r5//2///f4r//1v+rfr732WgDAZz/7WTz3uc8FYwx//ud/jre+9a24/fbbEUURXve61+FXfuVX9HNarRY+/elP48d//Mdxww03YGJiAu94xzvwjne847R/HgcHh7MHFRPI9bQPIR68qXG5SQ4ODuc6zhly9LGPfWysx5HCrl278Gd/9mdrHnPllVfib//2b0/hyBwcHM51UGqkI+55ow+qSEce/OmZzR2Ug4PDGcM5E1ZzcHBw2CwQT02FdGyI3fY5ooKCButrM+Lg4HDuwZEjBwcHh6byLGLrOpy4qdPB4byGu8MdHBwueFDVJ42sb0p05MjB4fyGu8MdHBwueEyGsnyfrKEcWdE2Clfd6uBwPsORIwcHhwsecX0WAEDXUI4qOUfO+sPB4byGI0cODg4XPFijLMsnbH1O+E45cnA4v+HIkYODwwUPVb5P6BruJnZYzXEjB4fzGo4cOTg4XPCgvMw1omvkHNl8iDt25OBwXsORIwcHhwseB591HbxoC/Ze96zxB1l8iDFHjhwczmecMw7ZDg4ODpuFyW0zeNvHfmvNY2xzSM4dOXJwOJ/hlCMHBweHDcLj6zOLdHBwODfhyJGDg4PDOmArR37gRHcHh/MZjhw5ODg4bBBB6MiRg8P5DEeOHBwcHNYDK80oiNbnh+Tg4HBuwpEjBwcHh3XATsGOa9HY4xwcHM59OHLk4ODgsA6YnCOKsBGf0bE4ODhsLhw5cnBwcFgHNDciHvyGU44cHM5nOHLk4ODgsA6YYjUO7nKOHBzOazhy5ODg4LAeSHZEiAcv9M/wYBwcHDYTjhw5ODg4rAN2WI2FTjlycDif4ciRg4ODwzpgErI9eGF4Rsfi4OCwuXDkyMHBwWEdoF5p/EhoBObIkYPDeQ1n8+rg4OCwDrBd28CXZ0G9vfAiR44cHM5nOHLk4ODgsA5Q3wMPrwEAsNj5HDk4nM9wYTUHBweHdWCLX9M/e44cOTic13DkyMHBwWEdmPIMIWJxbY0jHRwcznU4cuTg4OCwDhBquqtx3zuDI3FwcNhsOHLk4ODgsA6oUn5GkgpRcnBwOP/gyJGDg4PDOkBZSYg4Sc7wSBwcHDYbjhw5ODg4rANKOeI0O8MjcXBw2Gw4cuTg4OCwDqhQGqf5GR6Jg4PDZsORIwcHB4d1gNByuuTMKUcODuc7HDlycHBwWAcILz1zORNneCQODg6bDUeOHBwcHNYBuuVyAACf3HqGR+Lg4LDZcOTIwcHBYR3YetlWtOdi7L/94jM9FAcHh02G663m4ODgsA40pyP88/fecqaH4eDgcBrglCMHBwcHBwcHBwuOHDk4ODg4ODg4WHDkyMHBwcHBwcHBgiNHDg4ODg4ODg4WHDlycHBwcHBwcLDgyJGDg4ODg4ODgwVHjhwcHBwcHBwcLDhy5ODg4ODg4OBgwZEjBwcHBwcHBwcLjhw5ODg4ODg4OFhw5MjBwcHBwcHBwYIjRw4ODg4ODg4OFhw5cnBwcHBwcHCw4MiRg4ODg4ODg4MFfqYHcC5CCAEAWFpaOsMjcXA4C7G6an5eWgLy/MyN5UKBO+cODuuCWrfVOj4OjhydBJaXlwEAO3fuPMMjcXA4y7Ft25kewYUHd84dHE6I5eVltFqtsX8n4kT0yWEIRVHgiSeeQKPRACHkTA/nrMLS0hJ27tyJQ4cOodlsnunhnJdw5/j0wJ3nzYc7x6cH7jwbCCGwvLyMbdu2gdLxmUVOOToJUEqxY8eOMz2MsxrNZvOCvwk3G+4cnx6487z5cOf49MCd5xJrKUYKLiHbwcHBwcHBwcGCI0cODg4ODg4ODhYcOXI4pQiCAL/wC7+AIAjO9FDOW7hzfHrgzvPmw53j0wN3njcOl5Dt4ODg4ODg4GDBKUcODg4ODg4ODhYcOXJwcHBwcHBwsODIkYODg4ODg4ODBUeOHBwcHBwcHBwsOHLksCY+/OEPY+/evQjDENdffz0+97nPjT32TW96EwghQ/9dfvnl+piPfexjI4/p9Xqn4+OctdjIeQaA3//938fVV1+NOI6xdetW/PAP/zCOHTtWOeYTn/gEDh48iCAIcPDgQfzJn/zJZn6Esx6n+hy7a3k0Nnqe//N//s+47LLLEEURLr30UvzO7/zO0DHuWq7iVJ9jdy2PgHBwGIM//MM/FJ7niY985CPivvvuEz/1Uz8larWaeOSRR0Yev7CwIA4fPqz/O3TokJicnBS/8Au/oI/5L//lv4hms1k57vDhw6fpE52d2Oh5/tznPicopeLXfu3XxIMPPig+97nPicsvv1y84hWv0Mf8/d//vWCMife///3i/vvvF+9///sF51x88YtfPF0f66zCZpxjdy0PY6Pn+cMf/rBoNBriD//wD8X3vvc98d/+238T9XpdfPKTn9THuGu5is04x+5aHoYjRw5jcdNNN4kf/dEfrTx24P9r776Dojq/PoB/l2WXXZqICIIrIFHBgkIsoWhkgoIdK2rAXsZMjJIJWGJjdDSgEbtREooaBkaMMpogEUusCSqgKKuCxjJIMVhRlHreP3y5v70uIBCI7Xxmdsb7tPvcs4969u5zwcGBFixYUKf++/btI4lEQrdu3RLKoqKiqFmzZo05zXdefeO8Zs0asrOzE5Vt3LiRVCqVcOzr60sDBgwQtfH29qZx48Y10qzfLU0RY17L2uobZ1dXVwoMDBSVzZ07l9zd3YVjXstiTRFjXsva+Gs1Vq3S0lKkpqbCy8tLVO7l5YUzZ87UaYyIiAj069cPNjY2ovKnT5/CxsYGKpUKQ4YMQXp6eqPN+13TkDi7ubkhJycHiYmJICIUFBRgz549GDx4sNDmzz//1BrT29u7zu/d+6SpYgzwWtbUkDiXlJRAoVCIypRKJc6ePYuysjIAvJY1NVWMAV7Lr+LkiFWrsLAQFRUVsLCwEJVbWFggPz//tf3z8vJw8OBBTJ8+XVTu4OCA6Oho7N+/H7GxsVAoFHB3d0d2dnajzv9d0ZA4u7m5ISYmBmPHjoVcLkerVq1gYmKCTZs2CW3y8/Mb/N69b5oqxryWxRoSZ29vb/z0009ITU0FEeH8+fOIjIxEWVkZCgsLAfBa1tRUMea1rI2TI1YriUQiOiYirbLqREdHw8TEBMOHDxeVu7i4wN/fH926dUOfPn2we/dudOjQQfSfzoeoPnFWq9WYM2cOli5ditTUVCQlJeHmzZuYNWtWg8f8EDR2jHktV68+cV6yZAkGDhwIFxcXyGQy+Pj4YPLkyQAAqVTaoDE/BI0dY17L2jg5YtUyMzODVCrV+jRy7949rU8tryIiREZGYsKECZDL5bW21dHRQc+ePT/YTygNifN3330Hd3d3BAUFoWvXrvD29sbWrVsRGRmJvLw8AECrVq0a9N69j5oqxq/itVz/OCuVSkRGRqK4uBi3bt3CnTt3YGtrCyMjI5iZmQHgtaypqWL8qg99LQOcHLEayOVydO/eHcnJyaLy5ORkuLm51dr3+PHjuH79OqZNm/ba8xARLly4AEtLy38133dVQ+JcXFwMHR3xX92qT4D0/78q0dXVVWvMQ4cOvfa9ex81VYxfxWu54f9myGQyqFQqSKVSxMXFYciQIUL8eS3/T1PF+FUf+loGwI/ys5pVPTIaERFBarWaAgICyMDAQHj6bMGCBTRhwgStfv7+/vTJJ59UO2ZwcDAlJSXRjRs3KD09naZMmUK6urqUkpLSpNfyNqtvnKOiokhXV5e2bt1KN27coFOnTlGPHj2oV69eQpvTp0+TVCqlkJAQunLlCoWEhPDjz40cY17L2uob52vXrtGuXbsoKyuLUlJSaOzYsWRqako3b94U2vBaFmuKGPNa1sbJEavVli1byMbGhuRyOX388cd0/PhxoW7SpEnUt29fUftHjx6RUqmk8PDwascLCAgga2trksvl1LJlS/Ly8qIzZ8405SW8E+ob540bN1KnTp1IqVSSpaUl+fn5UU5OjqhNfHw82dvbk0wmIwcHB/rll1/+i0t5azV2jHktV68+cVar1eTk5ERKpZKMjY3Jx8eHrl69qjUmr2Wxxo4xr2VtEqIa7hEzxhhjjH2AeM8RY4wxxpgGTo4YY4wxxjRwcsQYY4wxpoGTI8YYY4wxDZwcMcYYY4xp4OSIMcYYY0wDJ0eMMcYYYxo4OWKMMcYaIDw8HB4eHjA2NoZEIsGjR4/q1O/u3bvw9/dHixYtoK+vDycnJ6SmpgIAysrKMH/+fDg6OsLAwABWVlaYOHEicnNzRWN4eHhAIpGIXuPGjRO1SUtLQ//+/WFiYoIWLVpg5syZePr0qVB///59DBgwAFZWVtDT00ObNm0we/ZsPHnyRGhz69YtrfNIJBIkJSWJzhUTE4Nu3bpBX18flpaWmDJlCu7fvy/UZ2ZmYtSoUbC1tYVEIsH69eu14lJeXo7Fixejbdu2UCqVsLOzw/Lly1FZWVmnuFbZvXs3nJycoK+vDxsbG6xZs6Ze/QFOjhhj74Hg4GA4OTm9sfMvWbIEM2fOrFPbwMBAzJkzp4lnxBqLh4cHoqOjq60rLi7GgAED8O2339Z5vIcPH8Ld3R0ymQwHDx6EWq3G2rVrYWJiIoyZlpaGJUuWIC0tDXv37kVWVhaGDRumNdaMGTOQl5cnvLZv3y7U5ebmol+/fmjXrh1SUlKQlJSEzMxMTJ48WWijo6MDHx8f7N+/H1lZWYiOjsbhw4cxa9YsrXMdPnxYdK7PPvtMqDt16hQmTpyIadOmITMzE/Hx8Th37hymT58uipWdnR1CQkLQqlWramMTGhqKbdu2YfPmzbhy5QpWr16NNWvWYNOmTXWO78GDB+Hn54dZs2bh8uXL2Lp1K8LCwrB58+Y6jwGAf7caY+ztBqDW16RJk6ioqIgKCwvfyPzy8/PJyMhI9LuqalNQUECGhob0999/N+3EWKPo27cvRUVF1drm2LFjBIAePnz42vHmz59PvXv3rtcczp49SwDo9u3bonnNnTu3xj7bt28nc3NzqqioEMrS09MJAGVnZ9fYb8OGDaRSqYTjmzdvEgBKT0+vsc+aNWvIzs5OVLZx40bROJpsbGxo3bp1WuWDBw+mqVOnispGjhxJ/v7+wnFJSQkFBQWRlZUV6evrU69evejYsWNC/fjx42n06NGiMdatW0cqlYoqKytrvIZX8Z0jxthbTfPT6vr162FsbCwq27BhAwwNDdGiRYs3Mr+IiAi4urrC1ta2Tu3Nzc3h5eWFbdu2Ne3E2Ftp//796NGjB8aMGQNzc3M4Ozvjxx9/rLXP48ePIZFIhLtLVWJiYmBmZobOnTsjMDAQRUVFQl1JSQnkcjl0dP7337xSqQTw8k5PdXJzc7F371707dtXq27YsGEwNzeHu7s79uzZI6pzc3NDTk4OEhMTQUQoKCjAnj17MHjw4Fqv61W9e/fGkSNHkJWVBQC4ePEiTp06hUGDBgltpkyZgtOnTyMuLg4ZGRkYM2YMBgwYgOzsbOG6FQqFaFylUomcnBzcvn277pOpcxrFGGNvWFRUFDVr1kyrfNmyZdStWzfheNKkSeTj40MrV64kc3NzatasGQUHB1NZWRkFBgZS8+bNqXXr1hQRESEaJycnh3x9fcnExIRMTU1p2LBhr70j5OjoSJs3bxaVxcfHU5cuXUihUJCpqSl5enrS06dPhfro6Ghq06ZNva+f/fca+86Rnp4e6enp0cKFCyktLY22bdtGCoWCduzYUW3758+fU/fu3cnPz09UHh4eTsnJyXTp0iWKjY0lW1tb6tevn1B/+fJl0tXVpdWrV1NJSQk9ePCARo4cSQBo1apVorHGjRtHSqWSANDQoUPp+fPnQt0///xDYWFhlJKSQufOnaMlS5aQjo4O7dq1SzRGfHw8GRoakq6uLgGgYcOGUWlpabXXVNOdo8rKSlqwYAFJJBLS1dUliUQimuv169dJIpHQ3bt3Rf08PT1p4cKFRPTyjpm+vj4dPnyYKioq6Nq1a+Tg4EAA6vXLdDk5Yoy9M+qTHBkZGdGXX35JV69epYiICAJA3t7etHLlSsrKyqIVK1aQTCajO3fuEBHRs2fPqH379jR16lTKyMggtVpNn3/+Odnb21NJSUm183nw4AFJJBL666+/hLLc3FzS1dWlsLAwunnzJmVkZNCWLVuoqKhIaKNWqwkA3bp1q3ECwxrNypUrycDAQHjp6OiQnp6eqOzEiROiPvVJjmQyGbm6uorKvvrqK3JxcdFqW1paSj4+PuTs7EyPHz+uddzz588TAEpNTRXKYmJiyMLCgqRSKcnlcgoMDCQLCwsKDQ0V9c3Ly6MrV65QQkICderUib744otazzV79mxydHQUjjMzM8nS0pJWr15NFy9epKSkJHJ0dNT6iqxKTclRbGwsqVQqio2NpYyMDNq5cyeZmppSdHQ0ERHt3r2bAIjeCwMDA9LV1SVfX18ieplgzZs3jxQKBUmlUmrevDkFBwcTAEpJSan1ujRxcsQYe2fUJzmysbER7bewt7enPn36CMfl5eVkYGBAsbGxREQUERFB9vb2on0JJSUlpFQq6ffff692PlV7OKoSLCKi1NTU1yY+jx8/JgD0xx9/vPaa2X/r/v37lJ2dLbx69epFoaGhorLi4mJRn/okR9bW1jRt2jRR2datW8nKykpUVlpaSsOHD6euXbvWaT9dZWUlyWQyiouL06rLz8+noqIievr0Keno6NDu3btrHOfkyZMEgHJzc2ts8/PPP5NCoRCO/f39tfb51DZOTcmRSqXSugu7YsUKsre3JyKiuLg4kkqldPXqVdH7kZ2dTXl5eaJ+5eXllJOTQyUlJZSYmEgAqKCgoMZrepVuvb4QZIyxd0Tnzp1F+y0sLCzQpUsX4VgqlaJFixa4d+8eACA1NRXXr1+HkZGRaJwXL17gxo0b1Z7j+fPnACDa49CtWzd4enrC0dER3t7e8PLywujRo9G8eXOhTdXej+Li4n95layxmZqawtTUVDhWKpUwNzdHu3btGmV8d3d3XLt2TVSWlZUFGxsb4bisrAy+vr7Izs7GsWPH6rSfLjMzE2VlZbC0tNSqs7CwAABERkZCoVCgf//+NY5DRABe7t2pSXp6uug8xcXF0NUVpxNSqVQ0Xl0UFxeL/s5WjVP1KL+zszMqKipw79499OnTp9axpFIpWrduDQCIjY2Fq6srzM3N6zwXTo4YY+8lmUwmOpZIJNWWVf3DW1lZie7duyMmJkZrrJYtW1Z7DjMzMwAvH8+uaiOVSpGcnIwzZ87g0KFD2LRpExYtWoSUlBS0bdsWAPDgwYNax2Xvhvz8fOTn5+P69esAgEuXLsHIyAjW1tZCguXp6YkRI0Zg9uzZAICvv/4abm5uWLVqFXx9fXH27FmEh4cjPDwcwMuf9TN69GikpaXh119/RUVFBfLz8wG8TNzkcjlu3LiBmJgYDBo0CGZmZlCr1fjmm2/g7OwMd3d3YX6bN2+Gm5sbDA0NkZycjKCgIISEhAgbuxMTE1FQUICePXvC0NAQarUa8+bNg7u7u/CAwY4dOyCTyeDs7AwdHR0cOHAAGzduRGhoqHCeoUOHYsaMGfjhhx/g7e2NvLw8BAQEoFevXrCysgIAlJaWQq1WC3++e/cuLly4AENDQyHxHDp0KFauXAlra2t07twZ6enpCAsLw9SpUwEAHTp0gJ+fHyZOnIi1a9fC2dkZhYWFOHr0KBwdHTFo0CAUFhZiz5498PDwwIsXLxAVFYX4+HgcP368fm9une8xMcbYG1bfDdmaqnv0WfP2fnh4ODVv3vy1ezs0VVRUkLGxMe3bt6/GNuXl5dS6dWtau3atUHb48GGSyWRaX8+wt09tG7KXLVtW7Y+X0GxvY2NDy5YtE/U7cOAAdenShfT09MjBwYHCw8OFuqpH56t7VT2yfufOHfr000/J1NSU5HI5ffTRRzRnzhy6f/++6DwTJkwQ2nTt2pV27twpqj969Ci5urpSs2bNSKFQUPv27Wn+/Pmirwejo6OpY8eOpK+vT0ZGRtS9e3etzdhELx/d79SpEymVSrK0tCQ/Pz/Kycl57XX17dtXaPPkyROaO3cuWVtbk0KhIDs7O1q0aJFoz19paSktXbqUbG1tSSaTUatWrWjEiBGUkZFBRC83kLu4uJCBgQHp6+uTp6enaE9gXUmI6nHPizHG3qDo6GgEBARo/STi4OBgJCQk4MKFCwCAyZMn49GjR0hISBDaeHh4wMnJSfSTeW1tbREQEICAgAAUFxfDyckJrVu3xvLly6FSqXDnzh3s3bsXQUFBUKlU1c5p1KhRaNu2Lb7//nsAQEpKCo4cOQIvLy+Ym5sjJSUF/v7+SEhIwMCBA4X5njx5EkeOHGm02DDGGg//nCPGGAOgr6+PEydOwNraGiNHjkTHjh0xdepUPH/+HMbGxjX2mzlzJuLi4oSv54yNjXHixAkMGjQIHTp0wOLFi7F27VohMQJe7oGYMWNGk18TY6xh+M4RY4z9C0QEFxcXBAQEYPz48a9t/9tvvyEoKAgZGRlam1gZY28HvnPEGGP/gkQiQXh4OMrLy+vU/tmzZ4iKiuLEiLG3GN85YowxxhjTwHeOGGOMMcY0cHLEGGOMMaaBkyPGGGOMMQ2cHDHGGGOMaeDkiDHGGGNMAydHjDHGGGMaODlijDHGGNPAyRFjjDHGmAZOjhhjjDHGNPwfurVvtDyzdKEAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "ripple_band_df = (lfp_band.LFPBandV1() & lfp_band_key).fetch1_dataframe()\n", + "\n", + "window = 0.1\n", + "i = -1\n", + "ripple_start = ripple_times.iloc[i].start_time\n", + "ripple_end = ripple_times.iloc[i].end_time\n", + "plt.plot(\n", + " ripple_band_df.loc[ripple_start - window : ripple_end + window].index,\n", + " ripple_band_df.loc[ripple_start - window : ripple_end + window].iloc[\n", + " :, ::15\n", + " ],\n", + ")\n", + "plt.axvline(ripple_start, color=\"r\")\n", + "plt.axvline(ripple_end, color=\"r\")\n", + "\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage (uV)\")" + ] + }, { "cell_type": "markdown", "id": "bead0afa", diff --git a/notebooks/py_scripts/03_Merge_Tables.py b/notebooks/py_scripts/03_Merge_Tables.py index 33b8e9a0e..ac3ad4e69 100644 --- a/notebooks/py_scripts/03_Merge_Tables.py +++ b/notebooks/py_scripts/03_Merge_Tables.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.0 +# jupytext_version: 1.15.2 # kernelspec: # display_name: spy # language: python @@ -137,17 +137,17 @@ # by referencing this or other features. # -uuid_key = LFPOutput.fetch(limit=1, as_dict=True)[-1] +uuid_key = (LFPOutput & nwb_file_dict).fetch(limit=1, as_dict=True)[-1] restrict = LFPOutput & uuid_key restrict -result1 = restrict.fetch_nwb() +result1 = restrict.fetch_nwb(restrict.fetch1("KEY")) result1 nwb_key = LFPOutput.merge_restrict(nwb_file_dict).fetch(as_dict=True)[0] nwb_key -result2 = (LFPOutput & nwb_key).fetch_nwb() +result2 = LFPOutput().fetch_nwb(nwb_key) result2 == result1 # ## Selecting data diff --git a/notebooks/py_scripts/10_Spike_SortingV0.py b/notebooks/py_scripts/10_Spike_SortingV0.py index 6daa28200..71a70b626 100644 --- a/notebooks/py_scripts/10_Spike_SortingV0.py +++ b/notebooks/py_scripts/10_Spike_SortingV0.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.0 +# jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3.10.5 64-bit # language: python @@ -70,6 +70,7 @@ import spyglass.common as sgc import spyglass.spikesorting.v0 as sgs +from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput # ignore datajoint+jupyter async warnings import warnings @@ -101,7 +102,10 @@ ], skip_duplicates=True, ) -sgc.LabMember.LabMemberInfo() +sgc.LabMember.LabMemberInfo() & { + "team_name": "My Team", + "lab_member_name": "Firstname Lastname", +} # We can try `fetch` to confirm. # @@ -157,7 +161,8 @@ # commonly use multiple electrodes in a `SortGroup` selected by what tetrode or # shank of a probe they were on. # -# _Note:_ This will delete any existing entries. Answer 'yes' when prompted. +# _Note:_ This will delete any existing entries. Answer 'yes' when prompted, or skip +# running this cell to leave data in place. # sgs.SortGroup().set_group_by_shank(nwb_file_name) @@ -332,7 +337,7 @@ def print_interval_duration(interval_list: np.ndarray): # into `ArtifactDetectionSelection`. # -sgs.ArtifactDetectionSelection().insert1(artifact_key) +sgs.ArtifactDetectionSelection().insert1(artifact_key, skip_duplicates=True) sgs.ArtifactDetectionSelection() & artifact_key # Then, we can populate `ArtifactDetection`, which will find periods where there @@ -437,15 +442,198 @@ def print_interval_duration(interval_list: np.ndarray): # # [(sgs.SpikeSortingSelection & ss_key).proj()] -sgs.SpikeSorting.populate() +sgs.SpikeSorting.populate(ss_key) # #### Check to make sure the table populated # sgs.SpikeSorting() & ss_key -# ## Next Steps +# ## Automatic Curation +# +# Spikesorting algorithms can sometimes identify noise or other undesired features as spiking units. +# Spyglass provides a curation pipeline to detect and label such features to exclude them +# from downstream analysis. +# +# + +# ### Initial Curation +# +# The `Curation` table keeps track of rounds of spikesorting curations in the spikesorting v0 pipeline. +# Before we begin, we first insert an initial curation entry with the spiking results. + +# + +for sorting_key in (sgs.SpikeSorting() & ss_key).fetch("KEY"): + # insert_curation will make an entry with a new curation_id regardless of whether it already exists + # to avoid this, we check if the curation already exists + if not (sgs.Curation() & sorting_key): + sgs.Curation.insert_curation(sorting_key) + +sgs.Curation() & ss_key +# - + +# ### Waveform Extraction +# +# Some metrics used for curating units are dependent on features of the spike waveform. +# We extract these for each unit's initial curation here + +# Parameters used for waveform extraction from the recording +waveform_params_name = "default_whitened" +( + sgs.WaveformParameters() & {"waveform_params_name": waveform_params_name} +).fetch(as_dict=True)[0] + +# extract waveforms +curation_keys = [ + {**k, "waveform_params_name": waveform_params_name} + for k in (sgs.Curation() & ss_key & {"curation_id": 0}).fetch("KEY") +] +sgs.WaveformSelection.insert(curation_keys, skip_duplicates=True) +sgs.Waveforms.populate(ss_key) + +# ### Quality Metrics +# +# With these waveforms, we can calculate the metrics used to determine the quality of each unit. + +# parameters which define what quality metrics are calculated and how +metric_params_name = "franklab_default3" +(sgs.MetricParameters() & {"metric_params_name": metric_params_name}).fetch( + "metric_params" +)[0] + +waveform_keys = [ + {**k, "metric_params_name": metric_params_name} + for k in (sgs.Waveforms() & ss_key).fetch("KEY") +] +sgs.MetricSelection.insert(waveform_keys, skip_duplicates=True) +sgs.QualityMetrics().populate(ss_key) +sgs.QualityMetrics() & ss_key + +# Look at the quality metrics for the first curation +(sgs.QualityMetrics() & ss_key).fetch_nwb()[0]["object_id"] + +# ### Automatic Curation Labeling +# +# With these metrics, we can assign labels to the sorted units using the `AutomaticCuration` table + +# We can select our criteria for unit labeling here +auto_curation_params_name = "default" +( + sgs.AutomaticCurationParameters() + & {"auto_curation_params_name": auto_curation_params_name} +).fetch1() + +# We can now apply the automatic curation criteria to the quality metrics +metric_keys = [ + {**k, "auto_curation_params_name": auto_curation_params_name} + for k in (sgs.QualityMetrics() & ss_key).fetch("KEY") +] +sgs.AutomaticCurationSelection.insert(metric_keys, skip_duplicates=True) +# populating this table will make a new entry in the curation table +sgs.AutomaticCuration().populate(ss_key) +sgs.Curation() & ss_key + +# ### Insert desired curation into downstream and merge tables for future analysis +# +# Now that we've performed auto-curation, we can insert the results of our chosen curation into +# `CuratedSpikeSorting` (the final table of this pipeline), and the merge table `SpikeSortingOutput`. +# Downstream analyses such as decoding will access the spiking data from there + +# + +# get the curation keys corresponding to the automatic curation +auto_curation_key_list = (sgs.AutomaticCuration() & ss_key).fetch( + "auto_curation_key" +) + +# insert into CuratedSpikeSorting +for auto_key in auto_curation_key_list: + # get the full key information needed + curation_auto_key = (sgs.Curation() & auto_key).fetch1("KEY") + sgs.CuratedSpikeSortingSelection.insert1( + curation_auto_key, skip_duplicates=True + ) +sgs.CuratedSpikeSorting.populate(ss_key) + +# Add the curated spike sorting to the SpikeSortingOutput merge table +keys_for_merge_tables = ( + sgs.CuratedSpikeSorting & auto_curation_key_list +).fetch("KEY") +SpikeSortingOutput.insert( + keys_for_merge_tables, + skip_duplicates=True, + part_name="CuratedSpikeSorting", +) +# Here's our result! +SpikeSortingOutput.CuratedSpikeSorting() & ss_key +# - + +# ## Manual Curation with figurl + +# As of June 2021, members of the Frank Lab can use the `sortingview` web app for +# manual curation. To make use of this, we need to populate the `CurationFigurl` table. +# +# We begin by selecting a starting point from the curation entries. In this case we will use +# the AutomaticCuration populated above as a starting point for manual curation, though you could also +# start from the opriginal curation entry by selecting the proper key from the `Curation` table +# +# _Note_: This step requires setting up your kachery sharing through the [sharing notebook](02_Data_Sync.ipynb) +# +# + +# + +starting_curations = (sgs.AutomaticCuration() & ss_key).fetch( + "auto_curation_key" +) # you could also select any key from the sgs.Curation table here + +username = "username" +fig_url_repo = f"gh://LorenFrankLab/sorting-curations/main/{username}/" # settings for franklab members + +sort_interval_name = interval_list_name +gh_url = ( + fig_url_repo + + str(nwb_file_name + "_" + sort_interval_name) # session id + + "/{}" # tetrode using auto_id['sort_group_id'] + + "/curation.json" +) # url where the curation is stored + +for auto_id in starting_curations: + auto_curation_out_key = dict( + **(sgs.Curation() & auto_id).fetch1("KEY"), + new_curation_uri=gh_url.format(str(auto_id["sort_group_id"])), + ) + sgs.CurationFigurlSelection.insert1( + auto_curation_out_key, skip_duplicates=True + ) + sgs.CurationFigurl.populate(auto_curation_out_key) +# - + +# We can then access the url for the curation figurl like so: + +print((sgs.CurationFigurl & ss_key).fetch("url")[0]) + +# This will take you to a workspace on the `sortingview` app. The workspace, which +# you can think of as a list of recording and associated sorting objects, was +# created at the end of spike sorting. On the workspace view, you will see a set +# of recordings that have been added to the workspace. +# +# ![Workspace view](./../notebook-images/workspace.png) +# +# Clicking on a recording then takes you to a page that gives you information +# about the recording as well as the associated sorting objects. +# +# ![Recording view](./../notebook-images/recording.png) +# +# Click on a sorting to see the curation view. Try exploring the many +# visualization widgets. +# +# ![Unit table](./../notebook-images/unittable.png) +# +# The most important is the `Units Table` and the `Curation` menu, which allows +# you to give labels to the units. The curation labels will persist even if you +# suddenly lose connection to the app; this is because the curation actions are +# appended to the workspace as soon as they are created. Note that if you are not +# logged in with your Google account, `Curation` menu may not be visible. Log in +# and refresh the page to access this feature. # -# Congratulations, you've spike sorted! See our -# [next notebook](./03_Curation.ipynb) for curation steps. +# ![Curation](./../notebook-images/curation.png) # diff --git a/notebooks/py_scripts/32_Ripple_Detection.py b/notebooks/py_scripts/32_Ripple_Detection.py index a2b6be30d..9bd360ec3 100644 --- a/notebooks/py_scripts/32_Ripple_Detection.py +++ b/notebooks/py_scripts/32_Ripple_Detection.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.0 +# jupytext_version: 1.15.2 # kernelspec: # display_name: spyglass # language: python @@ -39,6 +39,7 @@ import os import datajoint as dj import numpy as np +import pandas as pd # change to the upper level folder to detect dj_local_conf.json if os.path.basename(os.getcwd()) == "notebooks": @@ -46,11 +47,10 @@ dj.config.load("dj_local_conf.json") # load config for database connection info import spyglass.common as sgc -import spyglass.position as sgp -import spyglass.lfp as lfp +import spyglass.position.v1 as sgp import spyglass.lfp.analysis.v1 as lfp_analysis from spyglass.lfp import LFPOutput -import spyglass.lfp.v1 as sglfp +import spyglass.lfp as sglfp from spyglass.position import PositionOutput import spyglass.ripple.v1 as sgrip import spyglass.ripple.v1 as sgr @@ -62,10 +62,126 @@ warnings.simplefilter("ignore", category=ResourceWarning) # - -# ## Selecting Electrodes +# ## Generate LFP Ripple Band + +# First, we need to generate a filter band from the LFP data at the ripple frequency. This process is analogous to that in [30_LFP.ipynb](31_Theta.ipynb). + +# #### Make LFP + +# If you have already populated the LFP table for your data you may skip this step. Here, we will begin by creating a lfp group of just electrodes in the hippocampus region, and populate the lfp on a subsetted interval: + +# + +nwb_file_name = "mediumnwb20230802_.nwb" +lfp_electrode_group_name = "test_hippocampus" +interval_list_name = "02_r1_ripple_demo" + +# select hippocampus electrodes +electrodes_df = ( + pd.DataFrame( + ( + sgc.Electrode + & { + "nwb_file_name": nwb_file_name, + } + ) + * (sgc.BrainRegion & {"region_name": "hippocampus"}) + ) + .loc[:, ["nwb_file_name", "electrode_id", "region_name"]] + .sort_values(by="electrode_id") +) +# create lfp_electrode_group +lfp_eg_key = { + "nwb_file_name": nwb_file_name, + "lfp_electrode_group_name": lfp_electrode_group_name, +} +sglfp.lfp_electrode.LFPElectrodeGroup.create_lfp_electrode_group( + nwb_file_name=nwb_file_name, + group_name=lfp_electrode_group_name, + electrode_list=electrodes_df.electrode_id.tolist(), +) + +# make a shorter interval to run this demo on +interval_start = ( + sgc.IntervalList + & {"nwb_file_name": nwb_file_name, "interval_list_name": "02_r1"} +).fetch1("valid_times")[0][0] +truncated_interval = np.array( + [[interval_start, interval_start + 120]] +) # first 2 minutes of epoch +sgc.IntervalList.insert1( + { + "nwb_file_name": nwb_file_name, + "interval_list_name": "02_r1_ripple_demo", + "valid_times": truncated_interval, + }, + skip_duplicates=True, +) + +# make the lfp selection +lfp_s_key = lfp_eg_key.copy() +lfp_s_key.update( + { + "target_interval_list_name": interval_list_name, + "filter_name": "LFP 0-400 Hz", + "filter_sampling_rate": 30_000, # sampling rate of the data (Hz) + "target_sampling_rate": 1_000, # smpling rate of the lfp output (Hz) + } +) +sglfp.v1.LFPSelection.insert1(lfp_s_key, skip_duplicates=True) + +# populate the lfp +sglfp.v1.LFPV1.populate(lfp_s_key, display_progress=True) +sglfp.v1.LFPV1 & lfp_s_key +# - + +# #### Populate Ripple Band +# We now create a filter for this frequency band + +# + +sgc.FirFilterParameters().add_filter( + filter_name="Ripple 150-250 Hz", + fs=1000.0, + filter_type="bandpass", + band_edges=[140, 150, 250, 260], + comments="ripple band filter for 1 kHz data", +) + +sgc.FirFilterParameters() & "filter_name='Ripple 150-250 Hz'" +# - + +# We can then populate the ripple band + +# + +from spyglass.lfp.analysis.v1 import lfp_band + +filter_name = "Ripple 150-250 Hz" +lfp_band_electrode_ids = ( + electrodes_df.electrode_id.tolist() +) # assumes we've filtered these electrodes +lfp_band_sampling_rate = 1000 # desired sampling rate + +lfp_merge_id = (LFPOutput.LFPV1() & lfp_s_key).fetch1("merge_id") +lfp_band.LFPBandSelection().set_lfp_band_electrodes( + nwb_file_name=nwb_file_name, + lfp_merge_id=lfp_merge_id, + electrode_list=lfp_band_electrode_ids, + filter_name=filter_name, + interval_list_name=interval_list_name, + reference_electrode_list=[-1], # -1 means no ref electrode for all channels + lfp_band_sampling_rate=lfp_band_sampling_rate, +) + +lfp_band.LFPBandV1.populate( + {"lfp_merge_id": lfp_merge_id, "filter_name": filter_name}, + display_progress=True, +) +lfp_band.LFPBandV1 & {"lfp_merge_id": lfp_merge_id, "filter_name": filter_name} +# - + +# ## Selecting Ripple Analysis Electrodes # -# First, we'll pick the electrodes on which we'll run ripple detection on, using +# Next, we'll pick the electrodes on which we'll run ripple detection on, using # `RippleLFPSelection.set_lfp_electrodes` # @@ -79,13 +195,6 @@ # `electrode_list`. # -nwb_file_name = "tonks20211103_.nwb" -interval_list_name = "test interval" -filter_name = "Ripple 150-250 Hz" -if not sgc.Session & {"nwb_file_name": nwb_file_name}: - # This error will be raised when notebooks auto-run with 'minirec' - raise ValueError(f"Session with nwb_file_name={nwb_file_name} not found") - # Now we can look at `electrode_id` in the `Electrode` table: # @@ -165,12 +274,12 @@ # ## Setting Ripple Parameters # -sgr.RippleParameters() +sgr.RippleParameters().insert_default # Here are the default ripple parameters: # -(sgrip.RippleParameters() & {"ripple_param_name": "default"}).fetch1() +(sgrip.RippleParameters() & {"ripple_param_name": "default_trodes"}).fetch1() # - `filter_name`: which bandpass filter is used # - `speed_name`: the name of the speed parameters in `IntervalPositionInfo` @@ -188,19 +297,26 @@ # ## Check interval speed # # The speed for this interval should exist under the default position parameter -# set and for a given interval. +# set and for a given interval. We can quickly populate this here # -pos_key = sgp.PositionOutput.merge_get_part( - { - "nwb_file_name": nwb_file_name, - "position_info_param_name": "default", - "interval_list_name": "pos 1 valid times", - } -).fetch1("KEY") -(sgp.PositionOutput & pos_key).fetch1_dataframe() +pos_key = { + "nwb_file_name": nwb_file_name, + "trodes_pos_params_name": "single_led", + "interval_list_name": "pos 0 valid times", +} +sgp.TrodesPosSelection().insert1(pos_key, skip_duplicates=True) +sgp.TrodesPosV1.populate(pos_key, display_progress=True) +sgp.TrodesPosV1 & pos_key + +# + +from spyglass.position import PositionOutput -# We'll use the `head_speed` above as part of `RippleParameters`. +pos_key = PositionOutput.merge_get_part(pos_key).fetch1("KEY") +(PositionOutput & pos_key).fetch1_dataframe() +# - + +# We'll use the `speed` above as part of `RippleParameters`. Ensure your selected ripple parameters value for `speed_name` matches for your data. # # ## Run Ripple Detection @@ -210,7 +326,7 @@ # key = { - "ripple_param_name": "default", + "ripple_param_name": "default_trodes", **rip_sel_key, "pos_merge_id": pos_key["merge_id"], } @@ -222,6 +338,32 @@ ripple_times = (sgrip.RippleTimesV1() & key).fetch1_dataframe() ripple_times +# We can also inspect the lfp trace at these ripple times. +# +# * *Note: The ripple detection algorithm depends on estimates of the standard deviation of power in the ripple band. Running analysis on longer intervals will lead to better estimates of this value, and thereby better segmentation of ripple events* + +# + +import matplotlib.pyplot as plt + +ripple_band_df = (lfp_band.LFPBandV1() & lfp_band_key).fetch1_dataframe() + +window = 0.1 +i = -1 +ripple_start = ripple_times.iloc[i].start_time +ripple_end = ripple_times.iloc[i].end_time +plt.plot( + ripple_band_df.loc[ripple_start - window : ripple_end + window].index, + ripple_band_df.loc[ripple_start - window : ripple_end + window].iloc[ + :, ::15 + ], +) +plt.axvline(ripple_start, color="r") +plt.axvline(ripple_end, color="r") + +plt.xlabel("Time (s)") +plt.ylabel("Voltage (uV)") +# - + # ## Up Next # # Next, we'll [extract mark indicator](./31_Extract_Mark_Indicators.ipynb). diff --git a/src/spyglass/ripple/v1/ripple.py b/src/spyglass/ripple/v1/ripple.py index 336e07799..563b220d8 100644 --- a/src/spyglass/ripple/v1/ripple.py +++ b/src/spyglass/ripple/v1/ripple.py @@ -144,6 +144,24 @@ def insert_default(self): {"ripple_param_name": "default", "ripple_param_dict": default_dict}, skip_duplicates=True, ) + default_dict_trodes = { + "speed_name": "speed", + "ripple_detection_algorithm": "Kay_ripple_detector", + "ripple_detection_params": dict( + speed_threshold=4.0, # cm/s + minimum_duration=0.015, # sec + zscore_threshold=2.0, # std + smoothing_sigma=0.004, # sec + close_ripple_threshold=0.0, # sec + ), + } + self.insert1( + { + "ripple_param_name": "default_trodes", + "ripple_param_dict": default_dict_trodes, + }, + skip_duplicates=True, + ) @schema @@ -373,7 +391,6 @@ def create_figurl( lfp_offset=1, lfp_channel_ind=None, ): - ripple_times = self.fetch1_dataframe() def _add_ripple_times( From 440601f9d9658ce6dbc5d283af79a6136e3db606 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Wed, 20 Mar 2024 10:18:49 -0700 Subject: [PATCH 03/60] Remove curation notebook This is now redundant with the SpikesortingV0 tutorial --- docs/mkdocs.yml | 1 - notebooks/11_CurationV0.ipynb | 321 -------------------------- notebooks/py_scripts/11_CurationV0.py | 112 --------- 3 files changed, 434 deletions(-) delete mode 100644 notebooks/11_CurationV0.ipynb delete mode 100644 notebooks/py_scripts/11_CurationV0.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5eb02d0b6..996cb36dc 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -54,7 +54,6 @@ nav: - Spikes: - Spike Sorting V0: notebooks/10_Spike_SortingV0.ipynb - Spike Sorting V1: notebooks/10_Spike_SortingV1.ipynb - - Curation: notebooks/11_CurationV0.ipynb - Position: - Position Trodes: notebooks/20_Position_Trodes.ipynb - DLC Models: notebooks/21_DLC.ipynb diff --git a/notebooks/11_CurationV0.ipynb b/notebooks/11_CurationV0.ipynb deleted file mode 100644 index b8f99e430..000000000 --- a/notebooks/11_CurationV0.ipynb +++ /dev/null @@ -1,321 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Curation\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Overview\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "_Developer Note:_ if you may make a PR in the future, be sure to copy this\n", - "notebook, and use the `gitignore` prefix `temp` to avoid future conflicts.\n", - "\n", - "This is one notebook in a multi-part series on Spyglass.\n", - "\n", - "- To set up your Spyglass environment and database, see\n", - " [this notebook](./00_Setup.ipynb)\n", - "- For a more detailed introduction to DataJoint with inserts, see\n", - " [this notebook](./01_Insert_Data.ipynb)\n", - "- [The Spike Sorting notebook](./10_Spike_SortingV0.ipynb) is a mandatory\n", - " prerequisite to Curation.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Imports\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-07-21 13:55:48,211][INFO]: Connecting root@localhost:3306\n", - "[2023-07-21 13:55:48,242][INFO]: Connected root@localhost:3306\n", - "/home/cb/miniconda3/envs/spy/lib/python3.9/site-packages/spikeinterface/sortingcomponents/peak_detection.py:643: NumbaDeprecationWarning: \u001b[1mThe 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\u001b[0m\n", - " @numba.jit(parallel=False)\n", - "/home/cb/miniconda3/envs/spy/lib/python3.9/site-packages/spikeinterface/sortingcomponents/peak_detection.py:668: NumbaDeprecationWarning: \u001b[1mThe 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\u001b[0m\n", - " @numba.jit(parallel=False)\n" - ] - } - ], - "source": [ - "import os\n", - "import warnings\n", - "import datajoint as dj\n", - "\n", - "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n", - "\n", - "# change to the upper level folder to detect dj_local_conf.json\n", - "if os.path.basename(os.getcwd()) == \"notebooks\":\n", - " os.chdir(\"..\")\n", - "dj.config.load(\"dj_local_conf.json\") # load config for database connection info\n", - "\n", - "from spyglass.spikesorting.v0 import SpikeSorting" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Spikes Sorted\n", - "\n", - "Let's check that the sorting was successful in the previous notebook.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - " \n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "

nwb_file_name

\n", - " name of the NWB file\n", - "
\n", - "

sort_group_id

\n", - " identifier for a group of electrodes\n", - "
\n", - "

sort_interval_name

\n", - " name for this interval\n", - "
\n", - "

preproc_params_name

\n", - " \n", - "
\n", - "

team_name

\n", - " \n", - "
\n", - "

sorter

\n", - " \n", - "
\n", - "

sorter_params_name

\n", - " \n", - "
\n", - "

artifact_removed_interval_list_name

\n", - " \n", - "
\n", - "

sorting_path

\n", - " \n", - "
\n", - "

time_of_sort

\n", - " in Unix time, to the nearest second\n", - "
minirec20230622_.nwb001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times/home/cb/wrk/zOther/data/\"sorting\"/minirec20230622_.nwb_01_s1_first9_0_default_hippocampus_3335c236_spikesorting1689971050
\n", - " \n", - "

Total: 1

\n", - " " - ], - "text/plain": [ - "*nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo sorting_path time_of_sort \n", - "+------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", - "minirec2023062 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 /home/cb/wrk/z 1689971050 \n", - " (Total: 1)" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Define the name of the file that you copied and renamed from previous tutorials\n", - "nwb_file_name = \"minirec20230622.nwb\"\n", - "nwb_copy_file_name = \"minirec20230622_.nwb\"\n", - "SpikeSorting & {\"nwb_file_name\": nwb_copy_file_name}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## `sortingview` web app\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As of June 2021, members of the Frank Lab can use the `sortingview` web app for\n", - "manual curation.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# ERROR: curation_feed_uri not a field in SpikeSorting" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "workspace_uri = (SpikeSorting & {\"nwb_file_name\": nwb_copy_file_name}).fetch1(\n", - " \"curation_feed_uri\"\n", - ")\n", - "print(\n", - " f\"https://sortingview.vercel.app/workspace?workspace={workspace_uri}&channel=franklab\"\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This will take you to a workspace on the `sortingview` app. The workspace, which\n", - "you can think of as a list of recording and associated sorting objects, was\n", - "created at the end of spike sorting. On the workspace view, you will see a set\n", - "of recordings that have been added to the workspace.\n", - "\n", - "![Workspace view](./../notebook-images/workspace.png)\n", - "\n", - "Clicking on a recording then takes you to a page that gives you information\n", - "about the recording as well as the associated sorting objects.\n", - "\n", - "![Recording view](./../notebook-images/recording.png)\n", - "\n", - "Click on a sorting to see the curation view. Try exploring the many\n", - "visualization widgets.\n", - "\n", - "![Unit table](./../notebook-images/unittable.png)\n", - "\n", - "The most important is the `Units Table` and the `Curation` menu, which allows\n", - "you to give labels to the units. The curation labels will persist even if you\n", - "suddenly lose connection to the app; this is because the curation actions are\n", - "appended to the workspace as soon as they are created. Note that if you are not\n", - "logged in with your Google account, `Curation` menu may not be visible. Log in\n", - "and refresh the page to access this feature.\n", - "\n", - "![Curation](./../notebook-images/curation.png)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Up Next\n", - "\n", - "Next, we'll turn our attention to [LFP data](./12_LFP.ipynb) data.\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "vscode": { - "interpreter": { - "hash": "660bf1f5e0ab56266266a9ce589faf8a830d2aef7f15e27b16e9135d893e3d0b" - } - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/py_scripts/11_CurationV0.py b/notebooks/py_scripts/11_CurationV0.py deleted file mode 100644 index 80e8cdc16..000000000 --- a/notebooks/py_scripts/11_CurationV0.py +++ /dev/null @@ -1,112 +0,0 @@ -# --- -# jupyter: -# jupytext: -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.16.0 -# kernelspec: -# display_name: base -# language: python -# name: python3 -# --- - -# # Curation -# - -# ## Overview -# - -# _Developer Note:_ if you may make a PR in the future, be sure to copy this -# notebook, and use the `gitignore` prefix `temp` to avoid future conflicts. -# -# This is one notebook in a multi-part series on Spyglass. -# -# - To set up your Spyglass environment and database, see -# [this notebook](./00_Setup.ipynb) -# - For a more detailed introduction to DataJoint with inserts, see -# [this notebook](./01_Insert_Data.ipynb) -# - [The Spike Sorting notebook](./10_Spike_SortingV0.ipynb) is a mandatory -# prerequisite to Curation. -# - -# ## Imports -# - -# + -import os -import warnings -import datajoint as dj - -warnings.simplefilter("ignore", category=DeprecationWarning) -warnings.simplefilter("ignore", category=ResourceWarning) - -# change to the upper level folder to detect dj_local_conf.json -if os.path.basename(os.getcwd()) == "notebooks": - os.chdir("..") -dj.config.load("dj_local_conf.json") # load config for database connection info - -from spyglass.spikesorting.v0 import SpikeSorting - -# - - -# ## Spikes Sorted -# -# Let's check that the sorting was successful in the previous notebook. -# - -# Define the name of the file that you copied and renamed from previous tutorials -nwb_file_name = "minirec20230622.nwb" -nwb_copy_file_name = "minirec20230622_.nwb" -SpikeSorting & {"nwb_file_name": nwb_copy_file_name} - -# ## `sortingview` web app -# - -# As of June 2021, members of the Frank Lab can use the `sortingview` web app for -# manual curation. -# - -# + -# ERROR: curation_feed_uri not a field in SpikeSorting -# - - -workspace_uri = (SpikeSorting & {"nwb_file_name": nwb_copy_file_name}).fetch1( - "curation_feed_uri" -) -print( - f"https://sortingview.vercel.app/workspace?workspace={workspace_uri}&channel=franklab" -) - -# This will take you to a workspace on the `sortingview` app. The workspace, which -# you can think of as a list of recording and associated sorting objects, was -# created at the end of spike sorting. On the workspace view, you will see a set -# of recordings that have been added to the workspace. -# -# ![Workspace view](./../notebook-images/workspace.png) -# -# Clicking on a recording then takes you to a page that gives you information -# about the recording as well as the associated sorting objects. -# -# ![Recording view](./../notebook-images/recording.png) -# -# Click on a sorting to see the curation view. Try exploring the many -# visualization widgets. -# -# ![Unit table](./../notebook-images/unittable.png) -# -# The most important is the `Units Table` and the `Curation` menu, which allows -# you to give labels to the units. The curation labels will persist even if you -# suddenly lose connection to the app; this is because the curation actions are -# appended to the workspace as soon as they are created. Note that if you are not -# logged in with your Google account, `Curation` menu may not be visible. Log in -# and refresh the page to access this feature. -# -# ![Curation](./../notebook-images/curation.png) -# - -# ## Up Next -# -# Next, we'll turn our attention to [LFP data](./12_LFP.ipynb) data. -# From b7a2986f3368ecc79651c3d3908a3b92062698b1 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Thu, 21 Mar 2024 13:52:10 -0500 Subject: [PATCH 04/60] Refine query of upstream tables on FigURL populate (#871) * Refine query of upstream tables on FigURL populate * Update changelog * Typo --------- Co-authored-by: Eric Denovellis --- CHANGELOG.md | 1 + .../spikesorting/v1/figurl_curation.py | 39 ++++++++----------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b1cf8ebc..9a59e87da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - Spikesorting: - Increase`spikeinterface` version to >=0.99.1, <0.100 #852 - Bug fix in single artifact interval edge case #859 + - Bug fix in FigURL #871 - LFP - In LFPArtifactDetection, only apply referencing if explicitly selected #863 diff --git a/src/spyglass/spikesorting/v1/figurl_curation.py b/src/spyglass/spikesorting/v1/figurl_curation.py index f52d1a7ef..3aa480451 100644 --- a/src/spyglass/spikesorting/v1/figurl_curation.py +++ b/src/spyglass/spikesorting/v1/figurl_curation.py @@ -118,34 +118,29 @@ class FigURLCuration(SpyglassMixin, dj.Computed): def make(self, key: dict): # FETCH - sorting_analysis_file_name = ( - FigURLCurationSelection * CurationV1 & key - ).fetch1("analysis_file_name") - object_id = (FigURLCurationSelection * CurationV1 & key).fetch1( - "object_id" + query = ( + FigURLCurationSelection * CurationV1 * SpikeSortingSelection & key ) - recording_label = (SpikeSortingSelection & key).fetch1("recording_id") - metrics_figurl = (FigURLCurationSelection & key).fetch1( - "metrics_figurl" + ( + sorting_fname, + object_id, + recording_label, + metrics_figurl, + ) = query.fetch1( + "analysis_file_name", "object_id", "recording_id", "metrics_figurl" ) # DO - sorting_analysis_file_abs_path = AnalysisNwbfile.get_abs_path( - sorting_analysis_file_name - ) - recording = CurationV1.get_recording( - (FigURLCurationSelection & key).fetch1() - ) - sorting = CurationV1.get_sorting( - (FigURLCurationSelection & key).fetch1() - ) - sorting_label = (FigURLCurationSelection & key).fetch1("sorting_id") - curation_uri = (FigURLCurationSelection & key).fetch1("curation_uri") + sel_query = FigURLCurationSelection & key + sel_key = sel_query.fetch1() + sorting_fpath = AnalysisNwbfile.get_abs_path(sorting_fname) + recording = CurationV1.get_recording(sel_key) + sorting = CurationV1.get_sorting(sel_key) + sorting_label = sel_query.fetch1("sorting_id") + curation_uri = sel_query.fetch1("curation_uri") metric_dict = {} - with pynwb.NWBHDF5IO( - sorting_analysis_file_abs_path, "r", load_namespaces=True - ) as io: + with pynwb.NWBHDF5IO(sorting_fpath, "r", load_namespaces=True) as io: nwbf = io.read() nwb_sorting = nwbf.objects[object_id].to_dataframe() unit_ids = nwb_sorting.index From 3de6cdcc6989bbcef96f28dd803729238ed6100e Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Thu, 21 Mar 2024 11:53:08 -0700 Subject: [PATCH 05/60] Fix dlc video and kachery cloud config (#882) * Fix dlc video and kachery cloud config * Update CHANGELOG.md --------- Co-authored-by: Chris Brozdowski --- CHANGELOG.md | 1 + dj_local_conf_example.json | 2 +- src/spyglass/settings.py | 11 +++++++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a59e87da..b4a6406f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## [0.5.2] (Unreleased) - Refactor `TableChain` to include `_searched` attribute. #867 +- Fix errors in config import #882 ## [0.5.1] (March 7, 2024) diff --git a/dj_local_conf_example.json b/dj_local_conf_example.json index defdea4d7..b9b5e725e 100644 --- a/dj_local_conf_example.json +++ b/dj_local_conf_example.json @@ -41,7 +41,7 @@ "video": "/your/base/path/video" }, "kachery_dirs": { - "cloud": "/your/base/path/kachery_storage", + "cloud": "/your/base/path/.kachery-cloud", "storage": "/your/base/path/kachery_storage", "temp": "/your/base/path/tmp" }, diff --git a/src/spyglass/settings.py b/src/spyglass/settings.py index 202ac33fb..af16e688d 100644 --- a/src/spyglass/settings.py +++ b/src/spyglass/settings.py @@ -70,7 +70,7 @@ def __init__(self, base_dir: str = None, **kwargs): "video": "video", }, "kachery": { - "cloud": "kachery_storage", + "cloud": ".kachery-cloud", "storage": "kachery_storage", "temp": "tmp", }, @@ -181,10 +181,13 @@ def load_config( else None ) + source_config = ( + dj_dlc + if prefix == "dlc" + else dj_kachery if prefix == "kachery" else dj_spyglass + ) dir_location = ( - dj_spyglass.get(dir) - or dj_kachery.get(dir) - or dj_dlc.get(dir) + source_config.get(dir) or env_loc or str(Path(this_base) / dir_str) ).replace('"', "") From e858caeb95b9e451ccb4452544de127d84382d46 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Thu, 21 Mar 2024 16:14:34 -0500 Subject: [PATCH 06/60] Edits for docs (#883) * Docs updates * Exclude docs branch from gh-wf --- .github/ISSUE_TEMPLATE/bug_report.md | 10 +++++++++- .github/workflows/lint.yml | 6 +++++- .github/workflows/publish-docs.yml | 2 +- .github/workflows/test-conda.yml | 1 + .github/workflows/test-package-build.yml | 1 + docs/build-docs.sh | 1 + notebooks/README.md | 14 +++++++++++--- .../position/v1/position_dlc_pose_estimation.py | 7 ++++--- 8 files changed, 33 insertions(+), 9 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index bfd339332..ea044c956 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,11 +12,20 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: + 1. This error is on file '....' at file path '....' 2. Click on '....' 3. Scroll down to '....' 4. See error +Error Stack + +```python +# Paste the error stack trace here +``` + + + **Expected behavior** A clear and concise description of what you expected to happen. @@ -25,4 +34,3 @@ If applicable, add screenshots to help explain your problem. **Additional context** Add any other context about the problem here. - diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a75f17768..60dfd1693 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,6 +1,10 @@ name: Lint -on: [push, pull_request] +on: + push: + branches: + - '!documentation' + pull_request: jobs: black: diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 8cabef999..3b39b877c 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -40,7 +40,7 @@ jobs: - name: Deploy run: | - FULL_VERSION=${{ github.ref }} + FULL_VERSION=${{ github.ref_name }} export MAJOR_VERSION=${FULL_VERSION:0:3} echo "OWNER: ${REPO_OWNER}. BUILD: ${MAJOR_VERSION}" bash ./docs/build-docs.sh push $REPO_OWNER diff --git a/.github/workflows/test-conda.yml b/.github/workflows/test-conda.yml index 56347a38e..6432b366e 100644 --- a/.github/workflows/test-conda.yml +++ b/.github/workflows/test-conda.yml @@ -4,6 +4,7 @@ on: push: branches: - '!test_branch' + - '!documentation' schedule: # once a day at midnight UTC - cron: '0 0 * * *' diff --git a/.github/workflows/test-package-build.yml b/.github/workflows/test-package-build.yml index 96254215f..0fb98e620 100644 --- a/.github/workflows/test-package-build.yml +++ b/.github/workflows/test-package-build.yml @@ -6,6 +6,7 @@ on: - master - maint/* - '!test_branch' + - '!documentation' tags: - "*" pull_request: diff --git a/docs/build-docs.sh b/docs/build-docs.sh index 74174e9ca..bb9fa154a 100755 --- a/docs/build-docs.sh +++ b/docs/build-docs.sh @@ -7,6 +7,7 @@ cp ./CHANGELOG.md ./docs/src/ cp ./LICENSE ./docs/src/LICENSE.md mkdir -p ./docs/src/notebooks +rm -r ./docs/src/notebooks/* cp ./notebooks/*ipynb ./docs/src/notebooks/ cp ./notebooks/*md ./docs/src/notebooks/ mv ./docs/src/notebooks/README.md ./docs/src/notebooks/index.md diff --git a/notebooks/README.md b/notebooks/README.md index e5d540c06..aec471d19 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -55,14 +55,14 @@ Decoding can be from sorted or from unsorted data using spike waveform features (so-called clusterless decoding). The first notebook -([Extracting Clusterless Waveform Features](./41_Extracting_Clusterless_Waveform_Features.ipynb)) +([Extracting Clusterless Waveform Features](./40_Extracting_Clusterless_Waveform_Features.ipynb)) in this series shows how to retrieve the spike waveform features used for clusterless decoding. -The second notebook ([Clusterless Decoding](./42_Decoding_Clusterless.ipynb)) +The second notebook ([Clusterless Decoding](./41_Decoding_Clusterless.ipynb)) shows a detailed example of how to decode the position of the animal from the spike waveform features. The third notebook -([Decoding](./43_Decoding_SortedSpikes.ipynb)) shows how to decode the position +([Decoding](./42_Decoding_SortedSpikes.ipynb)) shows how to decode the position of the animal from the sorted spikes. ## Developer note @@ -79,3 +79,11 @@ black . ``` Unfortunately, jupytext-generated py script are not black-compliant by default. + +You can ensure black compliance with the `pre-commit` hook by running + +```bash +pip install pre-commit +``` + +This will run black whenever you commit changes to the repository. diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index b6d058176..3a4f50eba 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -86,9 +86,10 @@ def insert_estimation_task( Parameters ---------- - key: DataJoint key specifying a pairing of VideoRecording and Model. - task_mode (bool): Default 'trigger' computation. - Or 'load' existing results. + key: dict + DataJoint key specifying a pairing of VideoRecording and Model. + task_mode: bool, optional + Default 'trigger' computation. Or 'load' existing results. params (dict): Optional. Parameters passed to DLC's analyze_videos: videotype, gputouse, save_as_csv, batchsize, cropping, TFGPUinference, dynamic, robust_nframes, allow_growth, use_shelve From c94335d0fa5ea43f197d267c0c2be46bb171e78b Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Fri, 22 Mar 2024 13:12:00 -0700 Subject: [PATCH 07/60] Hhmi notebook check (#888) * reduce demo data size, insert parameter sets into new database * Update spikesort v0 notebook to run on new database * Update spikesort v1 notebook * fix spelling --- notebooks/10_Spike_SortingV0.ipynb | 25 +- notebooks/10_Spike_SortingV1.ipynb | 522 +++++++++----------- notebooks/32_Ripple_Detection.ipynb | 63 ++- notebooks/py_scripts/10_Spike_SortingV0.py | 27 +- notebooks/py_scripts/10_Spike_SortingV1.py | 157 +++--- notebooks/py_scripts/32_Ripple_Detection.py | 63 ++- 6 files changed, 481 insertions(+), 376 deletions(-) diff --git a/notebooks/10_Spike_SortingV0.ipynb b/notebooks/10_Spike_SortingV0.ipynb index ca2bf1622..d376db4b3 100644 --- a/notebooks/10_Spike_SortingV0.ipynb +++ b/notebooks/10_Spike_SortingV0.ipynb @@ -208,9 +208,9 @@ "source": [ "# Full name, Google email address, DataJoint username, admin\n", "name, email, dj_user, admin = (\n", - " \"Firstname Lastname\",\n", - " \"example@gmail.com\",\n", - " \"user\",\n", + " \"Firstname_spikesv0 Lastname_spikesv0\",\n", + " \"example_spikesv0@gmail.com\",\n", + " dj.config[\"database.user\"], # use the same username as the database\n", " 0,\n", ")\n", "sgc.LabMember.insert_from_name(name)\n", @@ -223,9 +223,23 @@ " ],\n", " skip_duplicates=True,\n", ")\n", + "\n", + "# Make a lab team if doesn't already exist, otherwise insert yourself into team\n", + "team_name = \"My Team\"\n", + "if not sgc.LabTeam() & {\"team_name\": team_name}:\n", + " sgc.LabTeam().create_new_team(\n", + " team_name=team_name, # Should be unique\n", + " team_members=[name],\n", + " team_description=\"test\", # Optional\n", + " )\n", + "else:\n", + " sgc.LabTeam.LabTeamMember().insert1(\n", + " {\"team_name\": team_name, \"lab_member_name\": name}, skip_duplicates=True\n", + " )\n", + "\n", "sgc.LabMember.LabMemberInfo() & {\n", " \"team_name\": \"My Team\",\n", - " \"lab_member_name\": \"Firstname Lastname\",\n", + " \"lab_member_name\": \"Firstname_spikesv0 Lastname_spikesv0\",\n", "}" ] }, @@ -2340,6 +2354,7 @@ "source": [ "# Parameters used for waveform extraction from the recording\n", "waveform_params_name = \"default_whitened\"\n", + "sgs.WaveformParameters().insert_default() # insert default parameter sets if not already in database\n", "(\n", " sgs.WaveformParameters() & {\"waveform_params_name\": waveform_params_name}\n", ").fetch(as_dict=True)[0]" @@ -2454,6 +2469,7 @@ "source": [ "# parameters which define what quality metrics are calculated and how\n", "metric_params_name = \"franklab_default3\"\n", + "sgs.MetricParameters().insert_default() # insert default parameter sets if not already in database\n", "(sgs.MetricParameters() & {\"metric_params_name\": metric_params_name}).fetch(\n", " \"metric_params\"\n", ")[0]" @@ -2768,6 +2784,7 @@ "source": [ "# We can select our criteria for unit labeling here\n", "auto_curation_params_name = \"default\"\n", + "sgs.AutomaticCurationParameters().insert_default()\n", "(\n", " sgs.AutomaticCurationParameters()\n", " & {\"auto_curation_params_name\": auto_curation_params_name}\n", diff --git a/notebooks/10_Spike_SortingV1.ipynb b/notebooks/10_Spike_SortingV1.ipynb index e07ea0f89..e650f2cbc 100644 --- a/notebooks/10_Spike_SortingV1.ipynb +++ b/notebooks/10_Spike_SortingV1.ipynb @@ -46,10 +46,15 @@ "if os.path.basename(os.getcwd()) == \"notebooks\":\n", " os.chdir(\"..\")\n", "dj.config[\"enable_python_native_blobs\"] = True\n", - "dj.config.load(\"dj_local_conf.json\") # load config for database connection info\n", - "\n", - "%load_ext autoreload\n", - "%autoreload 2" + "dj.config.load(\"dj_local_conf.json\") # load config for database connection info" + ] + }, + { + "cell_type": "markdown", + "id": "344e01b4", + "metadata": {}, + "source": [ + "## Insert Data and populate pre-requisite tables" ] }, { @@ -62,10 +67,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "16345184-c012-486c-b0b6-c914168f2449", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2024-03-22 09:25:28,835][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", + "[2024-03-22 09:25:28,874][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n" + ] + } + ], "source": [ "import spyglass.common as sgc\n", "import spyglass.spikesorting.v1 as sgs\n", @@ -77,38 +91,173 @@ "id": "48d2c06a-feb6-438c-94b3-4028127e2101", "metadata": {}, "source": [ - "We will be using `mediumnwb20230802.nwb` as our example. As usual, first insert the NWB file into `Session` (can skip if you have already done so)." + "We will be using `minirec20230622.nwb` as our example. As usual, first insert the NWB file into `Session` (can skip if you have already done so)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "a3a0ecdf-8dad-41d5-9ee2-fa60f80c746d", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/Documents/spyglass/src/spyglass/data_import/insert_sessions.py:58: UserWarning: Cannot insert data from minirec20230622.nwb: minirec20230622_.nwb is already in Nwbfile table.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Table for holding experimental sessions.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

subject_id

\n", + " \n", + "
\n", + "

institution_name

\n", + " \n", + "
\n", + "

lab_name

\n", + " \n", + "
\n", + "

session_id

\n", + " \n", + "
\n", + "

session_description

\n", + " \n", + "
\n", + "

session_start_time

\n", + " \n", + "
\n", + "

timestamps_reference_time

\n", + " \n", + "
\n", + "

experiment_description

\n", + " \n", + "
minirec20230622_.nwb54321UCSFLoren Frank Lab12345test yaml insertion2023-06-22 15:59:581970-01-01 00:00:00Test Conversion
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*nwb_file_name subject_id institution_na lab_name session_id session_descri session_start_ timestamps_ref experiment_des\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "minirec2023062 54321 UCSF Loren Frank La 12345 test yaml inse 2023-06-22 15: 1970-01-01 00: Test Conversio\n", + " (Total: 1)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nwb_file_name = \"minirec20230622.nwb\"\n", + "nwb_file_name2 = \"minirec20230622_.nwb\"\n", + "sgi.insert_sessions(nwb_file_name)\n", + "sgc.Session() & {\"nwb_file_name\": nwb_file_name2}" + ] + }, + { + "cell_type": "markdown", + "id": "49ea5ac0", + "metadata": {}, "source": [ - "nwb_file_name = \"mediumnwb20230802.nwb\"\n", - "nwb_file_name2 = \"mediumnwb20230802_.nwb\"" + "All spikesorting results are linked to a team name from the `LabTeam` table. If you haven't already inserted a team for your project do so here. " ] }, { "cell_type": "code", "execution_count": null, - "id": "dfa1b73e-da6e-470f-aff6-0d45c3ddff5c", + "id": "8d659323", "metadata": {}, "outputs": [], "source": [ - "sgi.insert_sessions(nwb_file_name)" + "# Make a lab team if doesn't already exist, otherwise insert yourself into team\n", + "team_name = \"My Team\"\n", + "if not sgc.LabTeam() & {\"team_name\": team_name}:\n", + " sgc.LabTeam().create_new_team(\n", + " team_name=team_name, # Should be unique\n", + " team_members=[],\n", + " team_description=\"test\", # Optional\n", + " )" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "e11b5f5d-e9e0-4949-9fc1-4a34cc975fb1", + "cell_type": "markdown", + "id": "4e390a71", "metadata": {}, - "outputs": [], "source": [ - "sgc.Session()" + "## Define sort groups and extract recordings" ] }, { @@ -116,7 +265,10 @@ "id": "5f3dfe2d-4645-44f9-b169-479292215afe", "metadata": {}, "source": [ - "Next, we will define the groups of electrodes to sort together. These are stored in `SortGroup` table. " + "Each NWB file will have multiple electrodes we can use for spike sorting. We\n", + "commonly use multiple electrodes in a `SortGroup` selected by what tetrode or\n", + "shank of a probe they were on. Electrodes in the same sort group will then be\n", + "sorted together." ] }, { @@ -144,20 +296,14 @@ "metadata": {}, "outputs": [], "source": [ + "# define and insert a key for each sort group and interval you want to sort\n", "key = {\n", " \"nwb_file_name\": nwb_file_name2,\n", " \"sort_group_id\": 0,\n", " \"preproc_param_name\": \"default\",\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e6d0dcb0-acfe-4adb-8da6-a5570b97f48a", - "metadata": {}, - "outputs": [], - "source": [ + " \"interval_list_name\": \"01_s1\",\n", + " \"team_name\": \"My Team\",\n", + "}\n", "sgs.SpikeSortingRecordingSelection.insert_selection(key)" ] }, @@ -178,44 +324,32 @@ "source": [ "# Assuming 'key' is a dictionary with fields that you want to include in 'ssr_key'\n", "ssr_key = {\n", - " \"recording_id\": (sgs.SpikeSortingRecordingSelection() & key).fetch1('recording_id'),\n", - "} | key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1bd48e28-d40e-4cf3-a89e-58d4c3cb08e8", - "metadata": {}, - "outputs": [], - "source": [ - "#sgs.SpikeSortingRecording.populate()\n", - "ssr_pk = (sgs.SpikeSortingRecordingSelection & key).proj()\n", - "\n", + " \"recording_id\": (sgs.SpikeSortingRecordingSelection() & key).fetch1(\n", + " \"recording_id\"\n", + " ),\n", + "} | key\n", "\n", - "sgs.SpikeSortingRecording.populate(ssr_pk)" + "ssr_pk = (sgs.SpikeSortingRecordingSelection & key).proj()\n", + "sgs.SpikeSortingRecording.populate(ssr_pk)\n", + "sgs.SpikeSortingRecording() & ssr_key" ] }, { "cell_type": "code", "execution_count": null, - "id": "9a9bf343-5b5e-457c-8bf4-f12b194a5489", + "id": "1c6c7ea3-9538-4fa9-890b-ee16cc18af31", "metadata": {}, "outputs": [], "source": [ - "sgs.SpikeSortingRecording() & ssr_key" + "key = (sgs.SpikeSortingRecordingSelection & key).fetch1()" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "1c6c7ea3-9538-4fa9-890b-ee16cc18af31", + "cell_type": "markdown", + "id": "348334fa", "metadata": {}, - "outputs": [], "source": [ - "key = (\n", - " sgs.SpikeSortingRecordingSelection & key\n", - ").fetch1()" + "## Artifact Detection" ] }, { @@ -235,16 +369,7 @@ "source": [ "sgs.ArtifactDetectionSelection.insert_selection(\n", " {\"recording_id\": key[\"recording_id\"], \"artifact_param_name\": \"default\"}\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bd89c36c-c05b-4b4a-85d9-7679fed173d1", - "metadata": {}, - "outputs": [], - "source": [ + ")\n", "sgs.ArtifactDetection.populate()" ] }, @@ -266,6 +391,14 @@ "The output of `ArtifactDetection` is actually stored in `IntervalList` because it is another type of interval. The UUID however can be found in both. " ] }, + { + "cell_type": "markdown", + "id": "0ee9ca19", + "metadata": {}, + "source": [ + "## Run Spike Sorting" + ] + }, { "cell_type": "markdown", "id": "65ae0f70-2d8d-40d4-86c9-2ab206b28ca9", @@ -291,7 +424,7 @@ "metadata": {}, "outputs": [], "source": [ - "sorter = 'mountainsort4'\n", + "sorter = \"mountainsort4\"\n", "\n", "common_key = {\n", " \"recording_id\": key[\"recording_id\"],\n", @@ -305,7 +438,7 @@ " ),\n", "}\n", "\n", - "if sorter == 'mountainsort4':\n", + "if sorter == \"mountainsort4\":\n", " key = {\n", " **common_key,\n", " \"sorter_param_name\": \"franklab_tetrode_hippocampus_30KHz\",\n", @@ -324,16 +457,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.SpikeSortingSelection.insert_selection(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2caed63b-6094-4a59-b8d9-6a0f186b2d3f", - "metadata": {}, - "outputs": [], - "source": [ + "sgs.SpikeSortingSelection.insert_selection(key)\n", "sgs.SpikeSortingSelection() & key" ] }, @@ -362,7 +486,7 @@ "id": "f3d1e621", "metadata": {}, "source": [ - "The spike sorting results (spike times of detected units) are saved in an NWB file. We can access this in two ways. First, we can access it via the `fetch_nwb` method, which allows us to directly access the spike times saved in the `units` table of the NWB file. Second, we can access it as a `spikeinterface.NWBSorting` object. Ths allows us to take advantage of the rich APIs of `spikeinterface` to further analyze the sorting. " + "The spike sorting results (spike times of detected units) are saved in an NWB file. We can access this in two ways. First, we can access it via the `fetch_nwb` method, which allows us to directly access the spike times saved in the `units` table of the NWB file. Second, we can access it as a `spikeinterface.NWBSorting` object. This allows us to take advantage of the rich APIs of `spikeinterface` to further analyze the sorting. " ] }, { @@ -386,29 +510,31 @@ }, { "cell_type": "markdown", - "id": "ea8fcaa0-9dd7-4870-9f5b-be039e3579cc", + "id": "55d6c183", "metadata": {}, "source": [ - "Next step is to curate the results of spike sorting. This is often necessary because spike sorting algorithms are not perfect; they often return clusters that are clearly not biological in origin, and sometimes oversplit clusters that should have been merged. We have two main ways of curating spike sorting: by computing quality metrics followed by thresholding, and manually applying curation labels. To do either, we first insert the spike sorting to `CurationV1` using `insert_curation` method.\n" + "## Automatic Curation" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "6245eec9-3fba-4071-b58b-eec6d9345532", + "cell_type": "markdown", + "id": "ea8fcaa0-9dd7-4870-9f5b-be039e3579cc", "metadata": {}, - "outputs": [], "source": [ - "sgs.SpikeSortingRecording & key" + "Next step is to curate the results of spike sorting. This is often necessary because spike sorting algorithms are not perfect;\n", + "they often return clusters that are clearly not biological in origin, and sometimes oversplit clusters that should have been merged.\n", + "We have two main ways of curating spike sorting: by computing quality metrics followed by thresholding, and manually applying curation labels.\n", + "To do either, we first insert the spike sorting to `CurationV1` using `insert_curation` method.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "0589a3f2-4977-407f-b49d-4ae3f882ae21", + "id": "6245eec9-3fba-4071-b58b-eec6d9345532", "metadata": {}, "outputs": [], "source": [ + "sgs.SpikeSortingRecording & key\n", "sgs.CurationV1.insert_curation(\n", " sorting_id=(\n", " sgs.SpikeSortingSelection & {\"recording_id\": key[\"recording_id\"]}\n", @@ -460,17 +586,8 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.MetricCurationSelection.insert_selection(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d73244b3-f754-4701-be52-ea261eb4185c", - "metadata": {}, - "outputs": [], - "source": [ - "sgs.MetricCurationSelection()" + "sgs.MetricCurationSelection.insert_selection(key)\n", + "sgs.MetricCurationSelection() & key" ] }, { @@ -480,17 +597,8 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.MetricCuration.populate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eda6577c-3ed2-480a-b6ed-107d7c479084", - "metadata": {}, - "outputs": [], - "source": [ - "sgs.MetricCuration()" + "sgs.MetricCuration.populate()\n", + "sgs.MetricCuration() & key" ] }, { @@ -512,46 +620,10 @@ " \"metric_curation_id\": (\n", " sgs.MetricCurationSelection & {\"sorting_id\": key[\"sorting_id\"]}\n", " ).fetch1(\"metric_curation_id\")\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45f3bc4a-2842-4802-ad0f-4f333dda171e", - "metadata": {}, - "outputs": [], - "source": [ - "labels = sgs.MetricCuration.get_labels(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df84263f-db5a-44b7-8309-4d63d10fd883", - "metadata": {}, - "outputs": [], - "source": [ - "merge_groups = sgs.MetricCuration.get_merge_groups(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "895c85a5-5b4f-44de-a003-c942ba231c22", - "metadata": {}, - "outputs": [], - "source": [ - "metrics = sgs.MetricCuration.get_metrics(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "863f6e17-40a6-4b8d-82b5-d14a059c5c77", - "metadata": {}, - "outputs": [], - "source": [ + "}\n", + "labels = sgs.MetricCuration.get_labels(key)\n", + "merge_groups = sgs.MetricCuration.get_merge_groups(key)\n", + "metrics = sgs.MetricCuration.get_metrics(key)\n", "sgs.CurationV1.insert_curation(\n", " sorting_id=(\n", " sgs.MetricCurationSelection\n", @@ -575,12 +647,23 @@ "sgs.CurationV1()" ] }, + { + "cell_type": "markdown", + "id": "a627274b", + "metadata": {}, + "source": [ + "## Manual Curation" + ] + }, { "cell_type": "markdown", "id": "cf8708a4-0a55-4309-b3c4-dbf47d61ad31", "metadata": {}, "source": [ - "next we will do manual curation. this is done with figurl. to incorporate info from other stages of processing (e.g. metrics) we have to store that with kachery cloud and get curation uri referring to it. it can be done with `generate_curation_uri`.\n" + "Next we will do manual curation. this is done with figurl. to incorporate info from other stages of processing (e.g. metrics) we have to store that with kachery cloud and get curation uri referring to it. it can be done with `generate_curation_uri`.\n", + "\n", + "_Note_: This step is dependent on setting up a kachery sharing system as described in [02_Data_Sync.ipynb](02_Data_Sync.ipynb)\n", + "and will likely not work correctly on the spyglass-demo server.\n" ] }, { @@ -598,16 +681,7 @@ " ).fetch1(\"sorting_id\"),\n", " \"curation_id\": 1,\n", " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1987fab4-9d4f-47dc-9546-90086fba7919", - "metadata": {}, - "outputs": [], - "source": [ + ")\n", "key = {\n", " \"sorting_id\": (\n", " sgs.MetricCurationSelection\n", @@ -616,16 +690,7 @@ " \"curation_id\": 1,\n", " \"curation_uri\": curation_uri,\n", " \"metrics_figurl\": list(metrics.keys()),\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c05728-9a87-4624-bd3b-82038ef68bd8", - "metadata": {}, - "outputs": [], - "source": [ + "}\n", "sgs.FigURLCurationSelection()" ] }, @@ -636,16 +701,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.FigURLCurationSelection.insert_selection(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd9667da-794f-4196-9e3d-527d8932d1e9", - "metadata": {}, - "outputs": [], - "source": [ + "sgs.FigURLCurationSelection.insert_selection(key)\n", "sgs.FigURLCurationSelection()" ] }, @@ -656,16 +712,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.FigURLCuration.populate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7bf6eb76-4883-4436-a320-7ade5c3af910", - "metadata": {}, - "outputs": [], - "source": [ + "sgs.FigURLCuration.populate()\n", "sgs.FigURLCuration()" ] }, @@ -693,16 +740,7 @@ " \"curation_id\": 1,\n", " \"curation_uri\": gh_curation_uri,\n", " \"metrics_figurl\": [],\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "449cdcdc-dcff-4aa6-a541-d674ccfbb0b5", - "metadata": {}, - "outputs": [], - "source": [ + "}\n", "sgs.FigURLCurationSelection.insert_selection(key)" ] }, @@ -713,16 +751,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgs.FigURLCuration.populate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37847473-1c46-4991-93a0-e315568e675a", - "metadata": {}, - "outputs": [], - "source": [ + "sgs.FigURLCuration.populate()\n", "sgs.FigURLCuration()" ] }, @@ -741,26 +770,8 @@ "metadata": {}, "outputs": [], "source": [ - "labels = sgs.FigURLCuration.get_labels(gh_curation_uri)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f467a487-100e-4217-914a-60e852805faf", - "metadata": {}, - "outputs": [], - "source": [ - "merge_groups = sgs.FigURLCuration.get_merge_groups(gh_curation_uri)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5873ad89-64cb-427a-a183-f15c2c42907a", - "metadata": {}, - "outputs": [], - "source": [ + "labels = sgs.FigURLCuration.get_labels(gh_curation_uri)\n", + "merge_groups = sgs.FigURLCuration.get_merge_groups(gh_curation_uri)\n", "sgs.CurationV1.insert_curation(\n", " sorting_id=key[\"sorting_id\"],\n", " parent_curation_id=1,\n", @@ -796,29 +807,11 @@ "metadata": {}, "outputs": [], "source": [ - "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5047f866-7435-4dea-9ed8-a9b2d8365682", - "metadata": {}, - "outputs": [], - "source": [ + "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput\n", + "\n", "SpikeSortingOutput()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "d2702410-01e1-4af0-a987-891c42c6c099", - "metadata": {}, - "outputs": [], - "source": [ - "key" - ] - }, { "cell_type": "code", "execution_count": null, @@ -826,53 +819,26 @@ "metadata": {}, "outputs": [], "source": [ - "SpikeSortingOutput.insert([key], part_name=\"CurationV1\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "184c3401-8df3-46f0-9dd0-c9fa98395c34", - "metadata": {}, - "outputs": [], - "source": [ + "SpikeSortingOutput.insert([key], part_name=\"CurationV1\")\n", "SpikeSortingOutput.merge_view()" ] }, { "cell_type": "code", "execution_count": null, - "id": "e2b083a5-b700-438a-8a06-2e2eb041072d", + "id": "184c3401-8df3-46f0-9dd0-c9fa98395c34", "metadata": {}, "outputs": [], "source": [ "SpikeSortingOutput.CurationV1()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10b8afa1-d4a6-4ac1-959b-f4e84e582f2e", - "metadata": {}, - "outputs": [], - "source": [ - "SpikeSortingOutput.CuratedSpikeSorting()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e9eebf75-6fef-43c4-80b8-12e59e5d743c", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "spyglass-2024-02-07", + "display_name": "spyglass", "language": "python", - "name": "spyglass-ds" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/notebooks/32_Ripple_Detection.ipynb b/notebooks/32_Ripple_Detection.ipynb index b9a37e279..bdd5c56a5 100644 --- a/notebooks/32_Ripple_Detection.ipynb +++ b/notebooks/32_Ripple_Detection.ipynb @@ -239,15 +239,29 @@ " pd.DataFrame(\n", " (\n", " sgc.Electrode\n", - " & {\n", - " \"nwb_file_name\": nwb_file_name,\n", - " }\n", + " & {\"nwb_file_name\": nwb_file_name, \"bad_channel\": \"False\"}\n", " )\n", " * (sgc.BrainRegion & {\"region_name\": \"hippocampus\"})\n", " )\n", - " .loc[:, [\"nwb_file_name\", \"electrode_id\", \"region_name\"]]\n", + " .loc[\n", + " :,\n", + " [\n", + " \"nwb_file_name\",\n", + " \"electrode_id\",\n", + " \"region_name\",\n", + " \"electrode_group_name\",\n", + " ],\n", + " ]\n", " .sort_values(by=\"electrode_id\")\n", ")\n", + "# for the purpose of the demo, we will only use one electrode per electrode group\n", + "electrodes_df = pd.DataFrame(\n", + " [\n", + " electrodes_df[electrodes_df.electrode_group_name == str(i)].iloc[0]\n", + " for i in np.unique(electrodes_df.electrode_group_name.values)\n", + " ]\n", + ")\n", + "\n", "# create lfp_electrode_group\n", "lfp_eg_key = {\n", " \"nwb_file_name\": nwb_file_name,\n", @@ -3038,7 +3052,25 @@ } ], "source": [ - "sgr.RippleParameters().insert_default" + "sgr.RippleParameters().insert_default()\n", + "sgr.RippleParameters.insert1(\n", + " {\n", + " \"ripple_param_name\": \"default_trodes\",\n", + " \"ripple_param_dict\": {\n", + " \"speed_name\": \"speed\", # name of the speed field in the position data\n", + " \"ripple_detection_algorithm\": \"Kay_ripple_detector\",\n", + " \"ripple_detection_params\": {\n", + " \"speed_threshold\": 4.0,\n", + " \"minimum_duration\": 0.015,\n", + " \"zscore_threshold\": 2.0,\n", + " \"smoothing_sigma\": 0.004,\n", + " \"close_ripple_threshold\": 0.0,\n", + " },\n", + " },\n", + " },\n", + " skip_duplicates=True,\n", + ")\n", + "sgr.RippleParameters()" ] }, { @@ -3220,6 +3252,25 @@ } ], "source": [ + "# insert the position parameter set\n", + "sgp.TrodesPosParams().insert1(\n", + " {\n", + " \"trodes_pos_params_name\": \"single_led\",\n", + " \"params\": {\n", + " \"max_separation\": 10000.0,\n", + " \"max_speed\": 300.0,\n", + " \"position_smoothing_duration\": 0.125,\n", + " \"speed_smoothing_std_dev\": 0.1,\n", + " \"orient_smoothing_std_dev\": 0.001,\n", + " \"led1_is_front\": 1,\n", + " \"is_upsampled\": 0,\n", + " \"upsampling_sampling_rate\": None,\n", + " \"upsampling_interpolation_method\": \"linear\",\n", + " },\n", + " },\n", + " skip_duplicates=True,\n", + ")\n", + "# populate the position if not done already\n", "pos_key = {\n", " \"nwb_file_name\": nwb_file_name,\n", " \"trodes_pos_params_name\": \"single_led\",\n", @@ -3842,7 +3893,7 @@ "source": [ "## Up Next\n", "\n", - "Next, we'll [extract mark indicator](./31_Extract_Mark_Indicators.ipynb).\n" + "We will learn how to [extract spike waveform features](./40_Extracting_Clusterless_Waveform_Features.ipynb) to decode neural data.\n" ] } ], diff --git a/notebooks/py_scripts/10_Spike_SortingV0.py b/notebooks/py_scripts/10_Spike_SortingV0.py index 71a70b626..2675799db 100644 --- a/notebooks/py_scripts/10_Spike_SortingV0.py +++ b/notebooks/py_scripts/10_Spike_SortingV0.py @@ -85,11 +85,12 @@ # If you haven't already done so, add yourself to `LabTeam` # +# + # Full name, Google email address, DataJoint username, admin name, email, dj_user, admin = ( - "Firstname Lastname", - "example@gmail.com", - "user", + "Firstname_spikesv0 Lastname_spikesv0", + "example_spikesv0@gmail.com", + dj.config["database.user"], # use the same username as the database 0, ) sgc.LabMember.insert_from_name(name) @@ -102,10 +103,25 @@ ], skip_duplicates=True, ) + +# Make a lab team if doesn't already exist, otherwise insert yourself into team +team_name = "My Team" +if not sgc.LabTeam() & {"team_name": team_name}: + sgc.LabTeam().create_new_team( + team_name=team_name, # Should be unique + team_members=[name], + team_description="test", # Optional + ) +else: + sgc.LabTeam.LabTeamMember().insert1( + {"team_name": team_name, "lab_member_name": name}, skip_duplicates=True + ) + sgc.LabMember.LabMemberInfo() & { "team_name": "My Team", - "lab_member_name": "Firstname Lastname", + "lab_member_name": "Firstname_spikesv0 Lastname_spikesv0", } +# - # We can try `fetch` to confirm. # @@ -479,6 +495,7 @@ def print_interval_duration(interval_list: np.ndarray): # Parameters used for waveform extraction from the recording waveform_params_name = "default_whitened" +sgs.WaveformParameters().insert_default() # insert default parameter sets if not already in database ( sgs.WaveformParameters() & {"waveform_params_name": waveform_params_name} ).fetch(as_dict=True)[0] @@ -497,6 +514,7 @@ def print_interval_duration(interval_list: np.ndarray): # parameters which define what quality metrics are calculated and how metric_params_name = "franklab_default3" +sgs.MetricParameters().insert_default() # insert default parameter sets if not already in database (sgs.MetricParameters() & {"metric_params_name": metric_params_name}).fetch( "metric_params" )[0] @@ -518,6 +536,7 @@ def print_interval_duration(interval_list: np.ndarray): # We can select our criteria for unit labeling here auto_curation_params_name = "default" +sgs.AutomaticCurationParameters().insert_default() ( sgs.AutomaticCurationParameters() & {"auto_curation_params_name": auto_curation_params_name} diff --git a/notebooks/py_scripts/10_Spike_SortingV1.py b/notebooks/py_scripts/10_Spike_SortingV1.py index 49f9d93cf..96ee444ff 100644 --- a/notebooks/py_scripts/10_Spike_SortingV1.py +++ b/notebooks/py_scripts/10_Spike_SortingV1.py @@ -5,15 +5,23 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.1 +# jupytext_version: 1.15.2 # kernelspec: -# display_name: spyglass-2024-02-07 +# display_name: spyglass # language: python -# name: spyglass-ds +# name: python3 # --- -# Connect to db. See instructions in [Setup](./00_Setup.ipynb). +# # Spike Sorting: pipeline version 1 + +# This is a tutorial for Spyglass spike sorting pipeline version 1 (V1). This pipeline coexists with [version 0](./10_Spike_SortingV0.ipynb) but differs in that: +# - it stores more of the intermediate results (e.g. filtered and referenced recording) in the NWB format +# - it has more streamlined curation pipelines +# - it uses UUIDs as the primary key for important tables (e.g. `SpikeSorting`) to reduce the number of keys that make up the composite primary key # +# The output of both versions of the pipeline are saved in a [merge table](./03_Merge_Tables.ipynb) called `SpikeSortingOutput`. + +# To start, connect to the database. See instructions in [Setup](./00_Setup.ipynb). # + import os @@ -25,46 +33,59 @@ os.chdir("..") dj.config["enable_python_native_blobs"] = True dj.config.load("dj_local_conf.json") # load config for database connection info - -# %load_ext autoreload -# %autoreload 2 # - -# import -# +# ## Insert Data and populate pre-requisite tables + +# First, import the pipeline and other necessary modules. import spyglass.common as sgc import spyglass.spikesorting.v1 as sgs import spyglass.data_import as sgi -# insert LabMember and Session -# - -nwb_file_name = "mediumnwb20230802.nwb" -nwb_file_name2 = "mediumnwb20230802_.nwb" +# We will be using `minirec20230622.nwb` as our example. As usual, first insert the NWB file into `Session` (can skip if you have already done so). +nwb_file_name = "minirec20230622.nwb" +nwb_file_name2 = "minirec20230622_.nwb" sgi.insert_sessions(nwb_file_name) +sgc.Session() & {"nwb_file_name": nwb_file_name2} -sgc.Session() +# All spikesorting results are linked to a team name from the `LabTeam` table. If you haven't already inserted a team for your project do so here. -# insert SortGroup -# +# Make a lab team if doesn't already exist, otherwise insert yourself into team +team_name = "My Team" +if not sgc.LabTeam() & {"team_name": team_name}: + sgc.LabTeam().create_new_team( + team_name=team_name, # Should be unique + team_members=[], + team_description="test", # Optional + ) + +# ## Define sort groups and extract recordings + +# Each NWB file will have multiple electrodes we can use for spike sorting. We +# commonly use multiple electrodes in a `SortGroup` selected by what tetrode or +# shank of a probe they were on. Electrodes in the same sort group will then be +# sorted together. sgs.SortGroup.set_group_by_shank(nwb_file_name=nwb_file_name2) -# insert SpikeSortingRecordingSelection. use `insert_selection` method. this automatically generates a unique recording id +# The next step is to filter and reference the recording so that we isolate the spike band data. This is done by combining the data with the parameters in `SpikeSortingRecordingSelection`. For inserting into this table, use `insert_selection` method. This automatically generates a UUID for a recording. # +# define and insert a key for each sort group and interval you want to sort key = { "nwb_file_name": nwb_file_name2, "sort_group_id": 0, "preproc_param_name": "default", + "interval_list_name": "01_s1", + "team_name": "My Team", } - -sgs.SpikeSortingRecordingSelection() & key - sgs.SpikeSortingRecordingSelection.insert_selection(key) +# Next we will call `populate` method of `SpikeSortingRecording`. + +# + # Assuming 'key' is a dictionary with fields that you want to include in 'ssr_key' ssr_key = { "recording_id": (sgs.SpikeSortingRecordingSelection() & key).fetch1( @@ -72,47 +93,33 @@ ), } | key -# preprocess recording (filtering and referencing) -# - -# + -# sgs.SpikeSortingRecording.populate() ssr_pk = (sgs.SpikeSortingRecordingSelection & key).proj() - - sgs.SpikeSortingRecording.populate(ssr_pk) -# - - sgs.SpikeSortingRecording() & ssr_key +# - key = (sgs.SpikeSortingRecordingSelection & key).fetch1() -# insert ArtifactDetectionSelection -# +# ## Artifact Detection + +# Sometimes the recording may contain artifacts that can confound spike sorting. For example, we often have artifacts when the animal licks the reward well for milk during behavior. These appear as sharp transients across all channels, and sometimes they are not adequately removed by filtering and referencing. We will identify the periods during which this type of artifact appears and set them to zero so that they won't interfere with spike sorting. sgs.ArtifactDetectionSelection.insert_selection( {"recording_id": key["recording_id"], "artifact_param_name": "default"} ) - -# detect artifact; note the output is stored in IntervalList -# - sgs.ArtifactDetection.populate() sgs.ArtifactDetection() -# insert SpikeSortingSelection. again use `insert_selection` method. -# -# We tested mountainsort4, mountainsort5, kilosort2_5, kilosort3, and ironclust. -# when using mountainsort5, pip install 'mountainsort5' -# when using Kilosorts and ironclust -- make sure to pip install 'cuda-python' and 'spython' -# For sorting with Kilosort, make sure to use a machine with GPU and put the whole probe not a sliced individual shank. -# +# The output of `ArtifactDetection` is actually stored in `IntervalList` because it is another type of interval. The UUID however can be found in both. + +# ## Run Spike Sorting -# Install mountainsort 4 if you haven't done it. +# Now that we have prepared the recording, we will pair this with a spike sorting algorithm and associated parameters. This will be inserted to `SpikeSortingSelection`, again via `insert_selection` method. -# #!pip install pybind11 -# !pip install mountainsort4 +# The spike sorting pipeline is powered by `spikeinterface`, a community-developed Python package that enables one to easily apply multiple spike sorters to a single recording. Some spike sorters have special requirements, such as GPU. Others need to be installed separately from spyglass. In the Frank lab, we have been using `mountainsort4`, though the pipeline have been tested with `mountainsort5`, `kilosort2_5`, `kilosort3`, and `ironclust` as well. +# +# When using `mountainsort5`, make sure to run `pip install mountainsort5`. `kilosort2_5`, `kilosort3`, and `ironclust` are MATLAB-based, but we can run these without having to install MATLAB thanks to `spikeinterface`. It does require downloading additional files (as singularity containers) so make sure to do `pip install spython`. These sorters also require GPU access, so also do ` pip install cuda-python` (and make sure your computer does have a GPU). # + sorter = "mountainsort4" @@ -129,13 +136,11 @@ ), } - if sorter == "mountainsort4": key = { **common_key, "sorter_param_name": "franklab_tetrode_hippocampus_30KHz", } - else: key = { **common_key, @@ -144,11 +149,9 @@ # - sgs.SpikeSortingSelection.insert_selection(key) - sgs.SpikeSortingSelection() & key -# run spike sorting -# +# Once `SpikeSortingSelection` is populated, let's run `SpikeSorting.populate`. # + sss_pk = (sgs.SpikeSortingSelection & key).proj() @@ -156,11 +159,22 @@ sgs.SpikeSorting.populate(sss_pk) # - -# we have two main ways of curating spike sorting: by computing quality metrics and applying threshold; and manually applying curation labels. to do so, we first insert CurationV1. use `insert_curation` method. +# The spike sorting results (spike times of detected units) are saved in an NWB file. We can access this in two ways. First, we can access it via the `fetch_nwb` method, which allows us to directly access the spike times saved in the `units` table of the NWB file. Second, we can access it as a `spikeinterface.NWBSorting` object. This allows us to take advantage of the rich APIs of `spikeinterface` to further analyze the sorting. + +sorting_nwb = (sgs.SpikeSorting & key).fetch_nwb() +sorting_si = sgs.SpikeSorting.get_sorting(key) + +# Note that the spike times of `fetch_nwb` is in units of seconds aligned with the timestamps of the recording. The spike times of the `spikeinterface.NWBSorting` object is in units of samples (as is generally true for sorting objects in `spikeinterface`). + +# ## Automatic Curation + +# Next step is to curate the results of spike sorting. This is often necessary because spike sorting algorithms are not perfect; +# they often return clusters that are clearly not biological in origin, and sometimes oversplit clusters that should have been merged. +# We have two main ways of curating spike sorting: by computing quality metrics followed by thresholding, and manually applying curation labels. +# To do either, we first insert the spike sorting to `CurationV1` using `insert_curation` method. # sgs.SpikeSortingRecording & key - sgs.CurationV1.insert_curation( sorting_id=( sgs.SpikeSortingSelection & {"recording_id": key["recording_id"]} @@ -170,8 +184,7 @@ sgs.CurationV1() -# we will first do an automatic curation based on quality metrics -# +# We will first do an automatic curation based on quality metrics. Under the hood, this part again makes use of `spikeinterface`. Some of the quality metrics that we often compute are the nearest neighbor isolation and noise overlap metrics, as well as SNR and ISI violation rate. For computing some of these metrics, the waveforms must be extracted and projected onto a feature space. Thus here we set the parameters for waveform extraction as well as how to curate the units based on these metrics (e.g. if `nn_noise_overlap` is greater than 0.1, mark as `noise`). key = { "sorting_id": ( @@ -184,12 +197,10 @@ } sgs.MetricCurationSelection.insert_selection(key) - -sgs.MetricCurationSelection() +sgs.MetricCurationSelection() & key sgs.MetricCuration.populate() - -sgs.MetricCuration() +sgs.MetricCuration() & key # to do another round of curation, fetch the relevant info and insert back into CurationV1 using `insert_curation` # @@ -199,13 +210,9 @@ sgs.MetricCurationSelection & {"sorting_id": key["sorting_id"]} ).fetch1("metric_curation_id") } - labels = sgs.MetricCuration.get_labels(key) - merge_groups = sgs.MetricCuration.get_merge_groups(key) - metrics = sgs.MetricCuration.get_metrics(key) - sgs.CurationV1.insert_curation( sorting_id=( sgs.MetricCurationSelection @@ -220,7 +227,12 @@ sgs.CurationV1() -# next we will do manual curation. this is done with figurl. to incorporate info from other stages of processing (e.g. metrics) we have to store that with kachery cloud and get curation uri referring to it. it can be done with `generate_curation_uri`. +# ## Manual Curation + +# Next we will do manual curation. this is done with figurl. to incorporate info from other stages of processing (e.g. metrics) we have to store that with kachery cloud and get curation uri referring to it. it can be done with `generate_curation_uri`. +# +# _Note_: This step is dependent on setting up a kachery sharing system as described in [02_Data_Sync.ipynb](02_Data_Sync.ipynb) +# and will likely not work correctly on the spyglass-demo server. # curation_uri = sgs.FigURLCurationSelection.generate_curation_uri( @@ -232,7 +244,6 @@ "curation_id": 1, } ) - key = { "sorting_id": ( sgs.MetricCurationSelection @@ -242,15 +253,12 @@ "curation_uri": curation_uri, "metrics_figurl": list(metrics.keys()), } - sgs.FigURLCurationSelection() sgs.FigURLCurationSelection.insert_selection(key) - sgs.FigURLCurationSelection() sgs.FigURLCuration.populate() - sgs.FigURLCuration() # or you can manually specify it if you already have a `curation.json` @@ -267,21 +275,17 @@ "curation_uri": gh_curation_uri, "metrics_figurl": [], } -# - - sgs.FigURLCurationSelection.insert_selection(key) +# - sgs.FigURLCuration.populate() - sgs.FigURLCuration() # once you apply manual curation (curation labels and merge groups) you can store them as nwb by inserting another row in CurationV1. And then you can do more rounds of curation if you want. # labels = sgs.FigURLCuration.get_labels(gh_curation_uri) - merge_groups = sgs.FigURLCuration.get_merge_groups(gh_curation_uri) - sgs.CurationV1.insert_curation( sorting_id=key["sorting_id"], parent_curation_id=1, @@ -296,16 +300,13 @@ # We now insert the curated spike sorting to a `Merge` table for feeding into downstream processing pipelines. # +# + from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput SpikeSortingOutput() - -key +# - SpikeSortingOutput.insert([key], part_name="CurationV1") - SpikeSortingOutput.merge_view() SpikeSortingOutput.CurationV1() - -SpikeSortingOutput.CuratedSpikeSorting() diff --git a/notebooks/py_scripts/32_Ripple_Detection.py b/notebooks/py_scripts/32_Ripple_Detection.py index 9bd360ec3..1a85028e4 100644 --- a/notebooks/py_scripts/32_Ripple_Detection.py +++ b/notebooks/py_scripts/32_Ripple_Detection.py @@ -80,15 +80,29 @@ pd.DataFrame( ( sgc.Electrode - & { - "nwb_file_name": nwb_file_name, - } + & {"nwb_file_name": nwb_file_name, "bad_channel": "False"} ) * (sgc.BrainRegion & {"region_name": "hippocampus"}) ) - .loc[:, ["nwb_file_name", "electrode_id", "region_name"]] + .loc[ + :, + [ + "nwb_file_name", + "electrode_id", + "region_name", + "electrode_group_name", + ], + ] .sort_values(by="electrode_id") ) +# for the purpose of the demo, we will only use one electrode per electrode group +electrodes_df = pd.DataFrame( + [ + electrodes_df[electrodes_df.electrode_group_name == str(i)].iloc[0] + for i in np.unique(electrodes_df.electrode_group_name.values) + ] +) + # create lfp_electrode_group lfp_eg_key = { "nwb_file_name": nwb_file_name, @@ -274,7 +288,25 @@ # ## Setting Ripple Parameters # -sgr.RippleParameters().insert_default +sgr.RippleParameters().insert_default() +sgr.RippleParameters.insert1( + { + "ripple_param_name": "default_trodes", + "ripple_param_dict": { + "speed_name": "speed", # name of the speed field in the position data + "ripple_detection_algorithm": "Kay_ripple_detector", + "ripple_detection_params": { + "speed_threshold": 4.0, + "minimum_duration": 0.015, + "zscore_threshold": 2.0, + "smoothing_sigma": 0.004, + "close_ripple_threshold": 0.0, + }, + }, + }, + skip_duplicates=True, +) +sgr.RippleParameters() # Here are the default ripple parameters: # @@ -300,6 +332,25 @@ # set and for a given interval. We can quickly populate this here # +# insert the position parameter set +sgp.TrodesPosParams().insert1( + { + "trodes_pos_params_name": "single_led", + "params": { + "max_separation": 10000.0, + "max_speed": 300.0, + "position_smoothing_duration": 0.125, + "speed_smoothing_std_dev": 0.1, + "orient_smoothing_std_dev": 0.001, + "led1_is_front": 1, + "is_upsampled": 0, + "upsampling_sampling_rate": None, + "upsampling_interpolation_method": "linear", + }, + }, + skip_duplicates=True, +) +# populate the position if not done already pos_key = { "nwb_file_name": nwb_file_name, "trodes_pos_params_name": "single_led", @@ -366,5 +417,5 @@ # ## Up Next # -# Next, we'll [extract mark indicator](./31_Extract_Mark_Indicators.ipynb). +# We will learn how to [extract spike waveform features](./40_Extracting_Clusterless_Waveform_Features.ipynb) to decode neural data. # From 70e4914407d8b5eb1e14b9b2715386ed9bb74ff0 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Tue, 26 Mar 2024 10:58:47 -0700 Subject: [PATCH 08/60] Update call to get_template_extremum (#893) * update call to get_template_extremum * update changelog * make version check more robust * spelling * update location of get_template)extremum_channel_peak_shift * Remove backcompatability for spikeinterface --- CHANGELOG.md | 7 +++++++ .../spikesorting/v0/spikesorting_curation.py | 19 ++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4a6406f4..a51c775b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,16 @@ ## [0.5.2] (Unreleased) +### Infrastructure + - Refactor `TableChain` to include `_searched` attribute. #867 - Fix errors in config import #882 +### Pipelines + +-Spikesorting + - Update calls in v0 pipeline for spikeinterface>=0.99 #893 + ## [0.5.1] (March 7, 2024) ### Infrastructure diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index 9d1bf9190..d960bb796 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -6,10 +6,17 @@ import warnings from pathlib import Path from typing import List +from packaging import version import datajoint as dj import numpy as np import spikeinterface as si + +if version.parse(si.__version__) < version.parse("0.99.1"): + raise ImportError( + "SpikeInterface version must updated. " + + "Please run `pip install spikeinterface==0.99.1` to update." + ) import spikeinterface.preprocessing as sip import spikeinterface.qualitymetrics as sq @@ -617,12 +624,10 @@ def _get_peak_offset( """Computes the shift of the waveform peak from center of window.""" if "peak_sign" in metric_params: del metric_params["peak_sign"] - peak_offset_inds = ( - si.postprocessing.get_template_extremum_channel_peak_shift( - waveform_extractor=waveform_extractor, - peak_sign=peak_sign, - **metric_params, - ) + peak_offset_inds = si.core.get_template_extremum_channel_peak_shift( + waveform_extractor=waveform_extractor, + peak_sign=peak_sign, + **metric_params, ) peak_offset = {key: int(abs(val)) for key, val in peak_offset_inds.items()} return peak_offset @@ -634,7 +639,7 @@ def _get_peak_channel( """Computes the electrode_id of the channel with the extremum peak for each unit.""" if "peak_sign" in metric_params: del metric_params["peak_sign"] - peak_channel_dict = si.postprocessing.get_template_extremum_channel( + peak_channel_dict = si.core.get_template_extremum_channel( waveform_extractor=waveform_extractor, peak_sign=peak_sign, **metric_params, From 448f9f48812d6992042ea62284c6e04f58419575 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 27 Mar 2024 10:45:35 -0700 Subject: [PATCH 09/60] Add spyglass version to created analysis nwb files (#897) * Add sg version to created analysis nwb files * update changelog --- CHANGELOG.md | 1 + src/spyglass/common/common_nwbfile.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c775b8..da03d58c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - Refactor `TableChain` to include `_searched` attribute. #867 - Fix errors in config import #882 +- Save current spyglass version in analysis nwb files to aid diagnosis #897 ### Pipelines diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index b5eabab97..d37b0bb61 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -11,6 +11,7 @@ import spikeinterface as si from hdmf.common import DynamicTable +from spyglass import __version__ as sg_version from spyglass.settings import analysis_dir, raw_dir from spyglass.utils import SpyglassMixin, logger from spyglass.utils.dj_helper_fn import get_child_tables @@ -193,6 +194,8 @@ def create(self, nwb_file_name): if isinstance(nwb_object, pynwb.core.LabelledDict): for module in list(nwb_object.keys()): nwb_object.pop(module) + # add the version of spyglass that created this file + nwbf.source_script = f"spyglass={sg_version}" analysis_file_name = self.__get_new_file_name(nwb_file_name) # write the new file From 309bde566a8bb56218eb0e6be631e6677981a8ad Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Thu, 28 Mar 2024 11:51:45 -0700 Subject: [PATCH 10/60] Change existing source script to spyglass version (#900) --- src/spyglass/common/common_nwbfile.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index d37b0bb61..cab0ef66f 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -8,6 +8,7 @@ import numpy as np import pandas as pd import pynwb +import h5py import spikeinterface as si from hdmf.common import DynamicTable @@ -182,6 +183,7 @@ def create(self, nwb_file_name): The name of the new NWB file. """ nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) + alter_source_script = False with pynwb.NWBHDF5IO( path=nwb_file_abspath, mode="r", load_namespaces=True ) as io: @@ -195,7 +197,10 @@ def create(self, nwb_file_name): for module in list(nwb_object.keys()): nwb_object.pop(module) # add the version of spyglass that created this file - nwbf.source_script = f"spyglass={sg_version}" + if nwbf.source_script is None: + nwbf.source_script = f"spyglass={sg_version}" + else: + alter_source_script = True analysis_file_name = self.__get_new_file_name(nwb_file_name) # write the new file @@ -208,6 +213,8 @@ def create(self, nwb_file_name): path=analysis_file_abs_path, mode="w", manager=io.manager ) as export_io: export_io.export(io, nwbf) + if alter_source_script: + self._alter_spyglass_version(analysis_file_abs_path) # change the permissions to only allow owner to write permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH @@ -215,6 +222,12 @@ def create(self, nwb_file_name): return analysis_file_name + @staticmethod + def _alter_spyglass_version(nwb_file_path): + """Change the source script to the current version of spyglass""" + with h5py.File(nwb_file_path, "a") as f: + f["/general/source_script"][()] = f"spyglass={sg_version}" + @classmethod def __get_new_file_name(cls, nwb_file_name): # each file ends with a random string of 10 digits, so we generate that From 65745f8ca0742c9a30ae549276fb0bdf0cc44f8e Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Fri, 29 Mar 2024 10:53:16 -0700 Subject: [PATCH 11/60] Add pynapple support (#898) * Preliminary code * Add retrieval of file names * Add get_nwb_table function * Update docstrings * Update CHANGELOG.md --- CHANGELOG.md | 1 + src/spyglass/utils/dj_helper_fn.py | 66 ++++++++++++++++++++---------- src/spyglass/utils/dj_mixin.py | 44 +++++++++++++++++++- 3 files changed, 89 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da03d58c1..9af02f4e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - Refactor `TableChain` to include `_searched` attribute. #867 - Fix errors in config import #882 - Save current spyglass version in analysis nwb files to aid diagnosis #897 +- Add pynapple support #898 ### Pipelines diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 4a0495778..c5fd82276 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -105,6 +105,49 @@ def dj_replace(original_table, new_values, key_column, replace_column): return original_table +def get_nwb_table(query_expression, tbl, attr_name, *attrs, **kwargs): + """Get the NWB file name and path from the given DataJoint query. + + Parameters + ---------- + query_expression : query + A DataJoint query expression (e.g., join, restrict) or a table to call fetch on. + tbl : table + DataJoint table to fetch from. + attr_name : str + Attribute name to fetch from the table. + *attrs : list + Attributes from normal DataJoint fetch call. + **kwargs : dict + Keyword arguments from normal DataJoint fetch call. + + Returns + ------- + nwb_files : list + List of NWB file names. + file_path_fn : function + Function to get the absolute path to the NWB file. + """ + from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile + + kwargs["as_dict"] = True # force return as dictionary + attrs = attrs or query_expression.heading.names # if none, all + + which = "analysis" if "analysis" in attr_name else "nwb" + tbl_map = { # map to file_name_str and file_path_fn + "analysis": ["analysis_file_name", AnalysisNwbfile.get_abs_path], + "nwb": ["nwb_file_name", Nwbfile.get_abs_path], + } + file_name_str, file_path_fn = tbl_map[which] + + # TODO: check that the query_expression restricts tbl - CBroz + nwb_files = ( + query_expression * tbl.proj(nwb2load_filepath=attr_name) + ).fetch(file_name_str) + + return nwb_files, file_path_fn + + def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): """Get an NWB object from the given DataJoint query. @@ -127,29 +170,10 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): nwb_objects : list List of dicts containing fetch results and NWB objects. """ - kwargs["as_dict"] = True # force return as dictionary tbl, attr_name = nwb_master - - if not attrs: - attrs = query_expression.heading.names - - # get the list of analysis or nwb files - file_name_str = ( - "analysis_file_name" if "analysis" in nwb_master[1] else "nwb_file_name" - ) - # TODO: avoid this import? - from ..common.common_nwbfile import AnalysisNwbfile, Nwbfile - - file_path_fn = ( - AnalysisNwbfile.get_abs_path - if "analysis" in nwb_master[1] - else Nwbfile.get_abs_path + nwb_files, file_path_fn = get_nwb_table( + query_expression, tbl, attr_name, *attrs, **kwargs ) - - # TODO: check that the query_expression restricts tbl - CBroz - nwb_files = ( - query_expression * tbl.proj(nwb2load_filepath=attr_name) - ).fetch(file_name_str) for file_name in nwb_files: file_path = file_path_fn(file_name) if not os.path.exists(file_path): diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 29978ae88..082116bf6 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -13,10 +13,15 @@ from spyglass.utils.database_settings import SHARED_MODULES from spyglass.utils.dj_chains import TableChain, TableChains -from spyglass.utils.dj_helper_fn import fetch_nwb +from spyglass.utils.dj_helper_fn import fetch_nwb, get_nwb_table from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK from spyglass.utils.logging import logger +try: + import pynapple # noqa F401 +except ImportError: + pynapple = None + class SpyglassMixin: """Mixin for Spyglass DataJoint tables. @@ -122,6 +127,43 @@ def fetch_nwb(self, *attrs, **kwargs): """ return fetch_nwb(self, self._nwb_table_tuple, *attrs, **kwargs) + def fetch_pynapple(self, *attrs, **kwargs): + """Get a pynapple object from the given DataJoint query. + + Parameters + ---------- + *attrs : list + Attributes from normal DataJoint fetch call. + **kwargs : dict + Keyword arguments from normal DataJoint fetch call. + + Returns + ------- + pynapple_objects : list of pynapple objects + List of dicts containing pynapple objects. + + Raises + ------ + ImportError + If pynapple is not installed. + + """ + if pynapple is None: + raise ImportError("Pynapple is not installed.") + + nwb_files, file_path_fn = get_nwb_table( + self, + self._nwb_table_tuple[0], + self._nwb_table_tuple[1], + *attrs, + **kwargs, + ) + + return [ + pynapple.load_file(file_path_fn(file_name)) + for file_name in nwb_files + ] + # ------------------------ delete_downstream_merge ------------------------ @cached_property From 9d5900a51a44da5693dcce33df68f736c394f5ef Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Fri, 29 Mar 2024 10:59:59 -0700 Subject: [PATCH 12/60] Hot fixes for clusterless `get_ahead_behind_distance` and `get_spike_times` (#904) * Squeeze results * Make method and not class method * Update CHANGELOG.md --- CHANGELOG.md | 5 ++++- src/spyglass/decoding/v1/clusterless.py | 2 +- src/spyglass/spikesorting/spikesorting_merge.py | 5 ++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9af02f4e2..2d011da68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,11 @@ ### Pipelines --Spikesorting +- Spikesorting - Update calls in v0 pipeline for spikeinterface>=0.99 #893 + - Fix method type of `get_spike_times` #904 +- Decoding + - Handle dimensions of clusterless `get_ahead_behind_distance` #904 ## [0.5.1] (March 7, 2024) diff --git a/src/spyglass/decoding/v1/clusterless.py b/src/spyglass/decoding/v1/clusterless.py index d69daf57f..570f689ea 100644 --- a/src/spyglass/decoding/v1/clusterless.py +++ b/src/spyglass/decoding/v1/clusterless.py @@ -466,7 +466,7 @@ def get_ahead_behind_distance(self): # TODO: Handle decode intervals, store in table classifier = self.fetch_model() - results = self.fetch_results() + results = self.fetch_results().squeeze() posterior = results.acausal_posterior.unstack("state_bins").sum("state") if getattr(classifier.environments[0], "track_graph") is not None: diff --git a/src/spyglass/spikesorting/spikesorting_merge.py b/src/spyglass/spikesorting/spikesorting_merge.py index b59a48911..ee41e1e04 100644 --- a/src/spyglass/spikesorting/spikesorting_merge.py +++ b/src/spyglass/spikesorting/spikesorting_merge.py @@ -68,10 +68,9 @@ def get_sorting(cls, key): query = source_table & cls.merge_get_part(key) return query.get_sorting(query.fetch("KEY")) - @classmethod - def get_spike_times(cls, key): + def get_spike_times(self, key): spike_times = [] - for nwb_file in cls.fetch_nwb(key): + for nwb_file in self.fetch_nwb(key): # V1 uses 'object_id', V0 uses 'units' file_loc = "object_id" if "object_id" in nwb_file else "units" spike_times.extend(nwb_file[file_loc]["spike_times"].to_list()) From 5d579020761abef03eca51760be8c91d9f2ef1bc Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 3 Apr 2024 15:06:42 -0700 Subject: [PATCH 13/60] fix bugs in fetch_nwb (#913) --- src/spyglass/utils/dj_helper_fn.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index c5fd82276..95eeb541f 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -170,7 +170,13 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): nwb_objects : list List of dicts containing fetch results and NWB objects. """ + kwargs["as_dict"] = True # force return as dictionary + tbl, attr_name = nwb_master + + if not attrs: + attrs = query_expression.heading.names + nwb_files, file_path_fn = get_nwb_table( query_expression, tbl, attr_name, *attrs, **kwargs ) From aa4ab8e0487019e60f53c20a9d5430813979d52d Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 10 Apr 2024 17:18:21 -0700 Subject: [PATCH 14/60] Check for entry in merge part table prior to insert (#922) * check for entry in merge part table prior to insert * update changelog --- CHANGELOG.md | 1 + src/spyglass/utils/dj_merge_tables.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d011da68..3001bd4f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Fix errors in config import #882 - Save current spyglass version in analysis nwb files to aid diagnosis #897 - Add pynapple support #898 +- Fix potential duplicate entries in Merge part tables #922 ### Pipelines diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index 37dc62fe0..c5461e75c 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -332,6 +332,9 @@ def _merge_insert(cls, rows: list, part_name: str = None, **kwargs) -> None: + f"{part_name}:\n\tData:{row}\n\t{keys}" ) key = keys[0] + if part & key: + print(f"Key already in part {part_name}: {key}") + continue master_sk = {cls()._reserved_sk: part_name} uuid = dj.hash.key_hash(key | master_sk) master_pk = {cls()._reserved_pk: uuid} From d40424fad3eb013311b6a6e79a81d40a4ae7eaa0 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 10 Apr 2024 17:22:38 -0700 Subject: [PATCH 15/60] Kachery fixes (#918) * Prioritize datajoint filepath for getting analysis file abs_path * remove deprecated kachery tables * update changelog * fix lint --------- Co-authored-by: Samuel Bray Co-authored-by: Eric Denovellis --- CHANGELOG.md | 1 + src/spyglass/common/common_nwbfile.py | 54 +++++++-------------------- 2 files changed, 15 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3001bd4f6..b9b4898d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Fix errors in config import #882 - Save current spyglass version in analysis nwb files to aid diagnosis #897 - Add pynapple support #898 +- Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile #918 - Fix potential duplicate entries in Merge part tables #922 ### Pipelines diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index cab0ef66f..c8f14af57 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -309,8 +309,8 @@ def add(self, nwb_file_name, analysis_file_name): ) self.insert1(key) - @staticmethod - def get_abs_path(analysis_nwb_file_name): + @classmethod + def get_abs_path(cls, analysis_nwb_file_name): """Return the absolute path for a stored analysis NWB file given just the file name. The spyglass config from settings.py must be set. @@ -325,6 +325,18 @@ def get_abs_path(analysis_nwb_file_name): analysis_nwb_file_abspath : str The absolute path for the given file name. """ + # If an entry exists in the database get the stored datajoint filepath + file_key = {"analysis_file_name": analysis_nwb_file_name} + if cls & file_key: + try: + # runs if file exists locally + return (cls & file_key).fetch1("analysis_file_abs_path") + except FileNotFoundError as e: + # file exists in database but not locally + # parse the intended path from the error message + return str(e).split(": ")[1].replace("'", "") + + # File not in database, define what it should be # see if the file exists and is stored in the base analysis dir test_path = f"{analysis_dir}/{analysis_nwb_file_name}" @@ -656,41 +668,3 @@ def nightly_cleanup(): AnalysisNwbfile.cleanup(True) # also check to see whether there are directories in the spikesorting folder with this - - -@schema -class NwbfileKachery(SpyglassMixin, dj.Computed): - definition = """ - -> Nwbfile - --- - nwb_file_uri: varchar(200) # the uri the NWB file for kachery - """ - - def make(self, key): - import kachery_client as kc - - logger.info(f'Linking {key["nwb_file_name"]} and storing in kachery...') - key["nwb_file_uri"] = kc.link_file( - Nwbfile().get_abs_path(key["nwb_file_name"]) - ) - self.insert1(key) - - -@schema -class AnalysisNwbfileKachery(SpyglassMixin, dj.Computed): - definition = """ - -> AnalysisNwbfile - --- - analysis_file_uri: varchar(200) # the uri of the file - """ - - def make(self, key): - import kachery_client as kc - - logger.info( - f'Linking {key["analysis_file_name"]} and storing in kachery...' - ) - key["analysis_file_uri"] = kc.link_file( - AnalysisNwbfile().get_abs_path(key["analysis_file_name"]) - ) - self.insert1(key) From a9eb28afcb2fbb3278df39f9117d77e39c6d732f Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Thu, 11 Apr 2024 14:45:44 -0700 Subject: [PATCH 16/60] remove old tables from init (#925) --- src/spyglass/common/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/spyglass/common/__init__.py b/src/spyglass/common/__init__.py index 75a6c7c2e..a91ceb3f3 100644 --- a/src/spyglass/common/__init__.py +++ b/src/spyglass/common/__init__.py @@ -42,9 +42,7 @@ from spyglass.common.common_lab import Institution, Lab, LabMember, LabTeam from spyglass.common.common_nwbfile import ( AnalysisNwbfile, - AnalysisNwbfileKachery, Nwbfile, - NwbfileKachery, ) from spyglass.common.common_position import ( IntervalLinearizationSelection, From 4460d26de14f9d0ae8c586e105891fcef3cb4b68 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Fri, 12 Apr 2024 08:29:18 -0700 Subject: [PATCH 17/60] Fix improper uses of strip (#929) Strip will remove leading characters --- src/spyglass/common/common_interval.py | 2 +- src/spyglass/decoding/v1/clusterless.py | 2 +- src/spyglass/decoding/v1/sorted_spikes.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spyglass/common/common_interval.py b/src/spyglass/common/common_interval.py index 24b143ad6..39c676f5a 100644 --- a/src/spyglass/common/common_interval.py +++ b/src/spyglass/common/common_interval.py @@ -139,7 +139,7 @@ def plot_epoch_pos_raw_intervals(self, figsize=(20, 5), return_fig=False): ax.text( interval[0] + np.diff(interval)[0] / 2, interval_y, - epoch.strip(" valid times"), + epoch.replace(" valid times", ""), ha="center", va="bottom", ) diff --git a/src/spyglass/decoding/v1/clusterless.py b/src/spyglass/decoding/v1/clusterless.py index 570f689ea..1c7fa830c 100644 --- a/src/spyglass/decoding/v1/clusterless.py +++ b/src/spyglass/decoding/v1/clusterless.py @@ -247,7 +247,7 @@ def make(self, key): # Insert results # in future use https://github.com/rly/ndx-xarray and analysis nwb file? - nwb_file_name = key["nwb_file_name"].strip("_.nwb") + nwb_file_name = key["nwb_file_name"].replace("_.nwb", "") # Generate a unique path for the results file path_exists = True diff --git a/src/spyglass/decoding/v1/sorted_spikes.py b/src/spyglass/decoding/v1/sorted_spikes.py index 43a71d91e..c36959a00 100644 --- a/src/spyglass/decoding/v1/sorted_spikes.py +++ b/src/spyglass/decoding/v1/sorted_spikes.py @@ -211,7 +211,7 @@ def make(self, key): # Insert results # in future use https://github.com/rly/ndx-xarray and analysis nwb file? - nwb_file_name = key["nwb_file_name"].strip("_.nwb") + nwb_file_name = key["nwb_file_name"].replace("_.nwb", "") # Generate a unique path for the results file path_exists = True From 201c67010ec941d58fa4d325955c3a6b3cac2c12 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Fri, 12 Apr 2024 08:30:32 -0700 Subject: [PATCH 18/60] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9b4898d1..9fd9e576c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - Fix method type of `get_spike_times` #904 - Decoding - Handle dimensions of clusterless `get_ahead_behind_distance` #904 + - Fix improper handling of nwb file names with .strip #929 ## [0.5.1] (March 7, 2024) From 995f4cd23563911124f2bf82d1bd52c345e92b5e Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 19 Apr 2024 07:39:12 -0700 Subject: [PATCH 19/60] Misc Issues (#903) * #892 * #885 * #879 * Partial address of #860 * Update Changelog * Partial solve of #886 - Ask import * Fix failing tests * Add note on order of inheritace * #933 * Could not replicate fill_nan error. Reverting except clause --- .github/ISSUE_TEMPLATE/feature_request.md | 6 +- .github/pull_request_template.md | 30 ++++- CHANGELOG.md | 12 +- docs/src/contribute.md | 10 +- docs/src/misc/merge_tables.md | 17 --- docs/src/misc/mixin.md | 123 ++++++++++++++++++ .../position/v1/position_trodes_position.py | 14 +- src/spyglass/utils/dj_chains.py | 5 +- src/spyglass/utils/dj_merge_tables.py | 37 ++++-- src/spyglass/utils/dj_mixin.py | 63 +++++---- tests/utils/conftest.py | 6 + 11 files changed, 254 insertions(+), 69 deletions(-) create mode 100644 docs/src/misc/mixin.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index bbcbbe7d6..a60543cfe 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -8,13 +8,15 @@ assignees: '' --- **Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] +A clear and concise description of what the problem is. +For example, I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. +A clear and concise description of any alternative solutions or features you've +considered. **Additional context** Add any other context or screenshots about the feature request here. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 7a9d62306..87c66399c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -5,15 +5,33 @@ include relevant motivation and context. Please list issues fixed or closed by This PR. - Fixes #000: How this PR fixes the issue - - `path/file.py`: Description of the change - - `path/file.py`: Description of the change + - `path/file.py`: Description of the change + - `path/file.py`: Description of the change - Fixes #000: How this PR fixes the issue - - `path/file.py`: Description of the change - - `path/file.py`: Description of the change + - `path/file.py`: Description of the change + - `path/file.py`: Description of the change # Checklist: + + - [ ] This PR should be accompanied by a release: (yes/no/unsure) -- [ ] (If release) I have updated the `CITATION.cff` -- [ ] I have updated the `CHANGELOG.md` +- [ ] If release, I have updated the `CITATION.cff` +- [ ] This PR makes edits to table definitions: (yes/no) +- [ ] If table edits, I have included an `alter` snippet for release notes. +- [ ] I have updated the `CHANGELOG.md` with PR number and description. - [ ] I have added/edited docs/notebooks to reflect the changes diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fd9e576c..faccf5a1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,20 @@ ## [0.5.2] (Unreleased) +### Release Notes + + + ### Infrastructure - Refactor `TableChain` to include `_searched` attribute. #867 - Fix errors in config import #882 - Save current spyglass version in analysis nwb files to aid diagnosis #897 - Add pynapple support #898 +- Update PR template checklist to include db changes. #903 +- Avoid permission check on personnel tables. #903 +- Add documentation for `SpyglassMixin`. #903 +- Add helper to identify merge table by definition. #903 - Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile #918 - Fix potential duplicate entries in Merge part tables #922 @@ -37,7 +45,7 @@ - Fixes to `_convert_mp4` #834 - Replace deprecated calls to `yaml.safe_load()` #834 - Spikesorting: - - Increase`spikeinterface` version to >=0.99.1, <0.100 #852 + - Increase`spikeinterface` version to >=0.99.1, \<0.100 #852 - Bug fix in single artifact interval edge case #859 - Bug fix in FigURL #871 - LFP @@ -214,3 +222,5 @@ [0.4.2]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.4.2 [0.4.3]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.4.3 [0.5.0]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.0 +[0.5.1]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.1 +[0.5.2]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.2 diff --git a/docs/src/contribute.md b/docs/src/contribute.md index 4d41569c0..d34698a39 100644 --- a/docs/src/contribute.md +++ b/docs/src/contribute.md @@ -45,7 +45,7 @@ By convention, an individual pipeline has one or more the following table types: - Parameters table - Selection table - Data table -- Merge Table (see also [doc](./misc/merge_tables.md) +- Merge Table (see also [doc](./misc/merge_tables.md)) ### Common/Multi-pipeline @@ -226,16 +226,8 @@ faulty connection. - During development, we suggest using a Docker container. See [example](./notebooks/00_Setup.ipynb). -- DataJoint is unable to set delete permissions on a per-table basis. If a user - is able to delete entries in a given table, she can delete entries in any - table in the schema. The `SpikeSorting` table extends the built-in `delete` - method to check if the username matches a list of allowed users when - `delete` is called. Issues #226 and #586 track the progress of generalizing - this feature. - `numpy` style docstrings will be interpreted by API docs. To check for compliance, monitor the std out when building docs (see `docs/README.md`) -- `fetch_nwb` is currently reperated across many tables. For progress on a fix, - follow issue #530 ## Making a release diff --git a/docs/src/misc/merge_tables.md b/docs/src/misc/merge_tables.md index 1cd4b000b..bd67de4c2 100644 --- a/docs/src/misc/merge_tables.md +++ b/docs/src/misc/merge_tables.md @@ -10,23 +10,6 @@ and related discussions [here](https://github.com/datajoint/datajoint-python/issues/151) and [here](https://github.com/LorenFrankLab/spyglass/issues/469). -**Note:** Deleting entries upstream of Merge Tables will throw errors related to -deleting a part entry before the master. To circumvent this, you can add -`force_parts=True` to the -[`delete` function](https://datajoint.com/docs/core/datajoint-python/0.14/api/datajoint/__init__/#datajoint.table.Table.delete) -call, but this will leave and orphaned primary key in the master. Instead, use -`(YourTable & restriction).delete_downstream_merge()` to delete master/part -pairs. If errors persist, identify and import the offending part table and rerun -`delete_downstream_merge` with `reload_cache=True`. This process will be faster -for subsequent calls if you reassign the your table after importing. - -```python -from spyglass.common import Nwbfile - -nwbfile = Nwbfile() -(nwbfile & "nwb_file_name LIKE 'Name%'").delete_downstream_merge() -``` - ## What A Merge Table is fundamentally a master table with one part for each divergent diff --git a/docs/src/misc/mixin.md b/docs/src/misc/mixin.md new file mode 100644 index 000000000..747a12f9f --- /dev/null +++ b/docs/src/misc/mixin.md @@ -0,0 +1,123 @@ +# Spyglass Mixin + +The Spyglass Mixin provides a way to centralize all Spyglass-specific +functionalities that have been added to DataJoint tables. This includes... + +- Fetching NWB files +- Delete functionality, including permission checks and part/master pairs +- Export logging. See [export doc](export.md) for more information. + +To add this functionality to your own tables, simply inherit from the mixin: + +```python +import datajoint as dj +from spyglass.utils import SpyglassMixin + +schema = dj.schema('my_schema') + +@schema +class MyOldTable(dj.Manual): + pass + +@schema +class MyNewTable(SpyglassMixin, dj.Manual):) + pass +``` + +**NOTE**: The mixin must be the first class inherited from in order to override +default DataJoint functions. + +## Fetching NWB Files + +Many tables in Spyglass inheris from central tables that store records of NWB +files. Rather than adding a helper function to each table, the mixin provides a +single function to access these files from any table. + +```python +from spyglass.example import AnyTable + +(AnyTable & my_restriction).fetch_nwb() +``` + +This function will look at the table definition to determine if the raw file +should be fetched from `Nwbfile` or an analysis file should be fetched from +`AnalysisNwbfile`. If neither is foreign-key-referenced, the function will refer +to a `_nwb_table` attribute. + +## Delete Functionality + +The mixin overrides the default `delete` function to provide two additional +features. + +### Permission Checks + +By default, DataJoint is unable to set delete permissions on a per-table basis. +If a user is able to delete entries in a given table, she can delete entries in +any table in the schema. + +The mixin relies on the `Session.Experimenter` and `LabTeams` tables to ... + +1. Check the session and experimenter associated with the attempted deletion. +2. Check the lab teams associated with the session experimenter and the user. + +If the user shares a lab team with the session experimenter, the deletion is +permitted. + +This is not secure system and is not a replacement for database backups (see +[database management](./database_management.md)). A user could readily +curcumvent the default permission checks by adding themselves to the relevant +team or removing the mixin from the class declaration. However, it provides a +reasonable level of security for the average user. + +### Master/Part Pairs + +By default, DataJoint has protections in place to prevent deletion of a part +entry without deleting the corresponding master. This is useful for enforcing +the custom of adding/removing all parts of a master at once and avoids orphaned +masters, or null entry masters without matching data. + +For [Merge tables](./merge_tables.md), this is a significant problem. If a user +wants to delete all entries associated with a given session, she must find all +Merge entries and delete them in the correct order. The mixin provides a +function, `delete_downstream_merge`, to handle this, which is run by default +when calling `delete`. + +`delete_downstream_merge`, also aliased as `ddm`, identifies all Merge tables +downsteam of where it is called. If `dry_run=True`, it will return a list of +entries that would be deleted, otherwise it will delete them. + +Importantly, `delete_downstream_merge` cannot properly interact with tables that +have not been imported into the current namespace. If you are having trouble +with part deletion errors, import the offending table and rerun the function +with `reload_cache=True`. + +```python +from spyglass.common import Nwbfile + +restricted_nwbfile = Nwbfile() & "nwb_file_name LIKE 'Name%'" +restricted_nwbfile.delete_downstream_merge(dry_run=False) +# DataJointError("Attempt to delete part table MyMerge.Part before ... + +from spyglass.example import MyMerge + +restricted_nwbfile.delete_downstream_merge(reload_cache=True, dry_run=False) +``` + +Because each table keeps a cache of downsteam merge tables, it is important to +reload the cache if the table has been imported after the cache was created. +Speed gains can also be achieved by avoiding re-instancing the table each time. + +```python +# Slow +from spyglass.common import Nwbfile + +(Nwbfile() & "nwb_file_name LIKE 'Name%'").ddm(dry_run=False) +(Nwbfile() & "nwb_file_name LIKE 'Other%'").ddm(dry_run=False) + +# Faster +from spyglass.common import Nwbfile + +nwbfile = Nwbfile() +(nwbfile & "nwb_file_name LIKE 'Name%'").ddm(dry_run=False) +(nwbfile & "nwb_file_name LIKE 'Other%'").ddm(dry_run=False) +``` diff --git a/src/spyglass/position/v1/position_trodes_position.py b/src/spyglass/position/v1/position_trodes_position.py index b2e2157d0..80f1cb700 100644 --- a/src/spyglass/position/v1/position_trodes_position.py +++ b/src/spyglass/position/v1/position_trodes_position.py @@ -288,8 +288,13 @@ def make(self, key): f"{current_dir.as_posix()}/{nwb_base_filename}_" f"{epoch:02d}_{key['trodes_pos_params_name']}.mp4" ) + red_cols = ( + ["xloc", "yloc"] + if "xloc" in raw_position_df.columns + else ["xloc1", "yloc1"] + ) centroids = { - "red": np.asarray(raw_position_df[["xloc", "yloc"]]), + "red": np.asarray(raw_position_df[red_cols]), "green": np.asarray(raw_position_df[["xloc2", "yloc2"]]), } position_mean = np.asarray( @@ -330,6 +335,7 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): @staticmethod def fill_nan(variable, video_time, variable_time): + # TODO: Reduce duplication across dlc_utils and common_position video_ind = np.digitize(variable_time, video_time[1:]) n_video_time = len(video_time) @@ -338,6 +344,7 @@ def fill_nan(variable, video_time, variable_time): filled_variable = np.full((n_video_time, n_variable_dims), np.nan) except IndexError: filled_variable = np.full((n_video_time,), np.nan) + filled_variable[video_ind] = variable return filled_variable @@ -450,4 +457,7 @@ def make_video( video.release() out.release() - cv2.destroyAllWindows() + try: + cv2.destroyAllWindows() + except cv2.error: # if cv is already closed or does not have func + pass diff --git a/src/spyglass/utils/dj_chains.py b/src/spyglass/utils/dj_chains.py index 76ffeb107..fe9cebc02 100644 --- a/src/spyglass/utils/dj_chains.py +++ b/src/spyglass/utils/dj_chains.py @@ -282,7 +282,10 @@ def find_path(self, directed=True) -> OrderedDict: if table.isnumeric(): # get proj() attribute map for alias node if not prev_table: raise ValueError("Alias node found without prev table.") - attr_map = self.graph[table][prev_table]["attr_map"] + try: + attr_map = self.graph[table][prev_table]["attr_map"] + except KeyError: # Why is this only DLCCentroid?? + attr_map = self.graph[prev_table][table]["attr_map"] ret[prev_table]["attr_map"] = attr_map else: free_table = dj.FreeTable(self._connection, table) diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index c5461e75c..1e14468da 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -18,6 +18,30 @@ RESERVED_PRIMARY_KEY = "merge_id" RESERVED_SECONDARY_KEY = "source" RESERVED_SK_LENGTH = 32 +MERGE_DEFINITION = ( + f"\n {RESERVED_PRIMARY_KEY}: uuid\n ---\n" + + f" {RESERVED_SECONDARY_KEY}: varchar({RESERVED_SK_LENGTH})\n " +) + + +def is_merge_table(table): + """Return True if table definition matches the default Merge table. + + Regex removes comments and blank lines before comparison. + """ + if not isinstance(table, dj.Table): + return False + if isinstance(table, dj.FreeTable): + fields, pk = table.heading.names, table.primary_key + return fields == [ + RESERVED_PRIMARY_KEY, + RESERVED_SECONDARY_KEY, + ] and pk == [RESERVED_PRIMARY_KEY] + return MERGE_DEFINITION == re.sub( + r"\n\s*\n", + "\n", + re.sub(r"#.*\n", "\n", getattr(table, "definition", "")), + ) class Merge(dj.Manual): @@ -34,21 +58,16 @@ def __init__(self): super().__init__() self._reserved_pk = RESERVED_PRIMARY_KEY self._reserved_sk = RESERVED_SECONDARY_KEY - merge_def = ( - f"\n {self._reserved_pk}: uuid\n ---\n" - + f" {self._reserved_sk}: varchar({RESERVED_SK_LENGTH})\n " - ) if not self.is_declared: - # remove comments after # from each line of definition - if self._remove_comments(self.definition) != merge_def: + if not is_merge_table(self): # Check definition logger.warn( - "Merge table with non-default definition\n\t" - + f"Expected: {merge_def.strip()}\n\t" + "Merge table with non-default definition\n" + + f"Expected: {MERGE_DEFINITION.strip()}\n" + f"Actual : {self.definition.strip()}" ) for part in self.parts(as_objects=True): if part.primary_key != self.primary_key: - logger.warn( + logger.warn( # PK is only 'merge_id' in parts, no others f"Unexpected primary key in {part.table_name}" + f"\n\tExpected: {self.primary_key}" + f"\n\tActual : {part.primary_key}" diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 082116bf6..9377b30c2 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -9,12 +9,14 @@ from datajoint.logging import logger as dj_logger from datajoint.table import Table from datajoint.utils import get_master, user_choice +from networkx import NetworkXError from pymysql.err import DataError from spyglass.utils.database_settings import SHARED_MODULES from spyglass.utils.dj_chains import TableChain, TableChains from spyglass.utils.dj_helper_fn import fetch_nwb, get_nwb_table from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK +from spyglass.utils.dj_merge_tables import Merge, is_merge_table from spyglass.utils.logging import logger try: @@ -67,20 +69,22 @@ def __init__(self, *args, **kwargs): Checks that schema prefix is in SHARED_MODULES. """ - if ( - self.database # Connected to a database - and not self.is_declared # New table - and self.database.split("_")[0] # Prefix - not in [ - *SHARED_MODULES, # Shared modules - dj.config["database.user"], # User schema - "temp", - "test", - ] - ): + if self.is_declared: + return + if self.database and self.database.split("_")[0] not in [ + *SHARED_MODULES, + dj.config["database.user"], + "temp", + "test", + ]: logger.error( f"Schema prefix not in SHARED_MODULES: {self.database}" ) + if is_merge_table(self) and not isinstance(self, Merge): + raise TypeError( + "Table definition matches Merge but does not inherit class: " + + self.full_table_name + ) # ------------------------------- fetch_nwb ------------------------------- @@ -175,6 +179,7 @@ def _merge_tables(self) -> Dict[str, dj.FreeTable]: """ self.connection.dependencies.load() merge_tables = {} + visited = set() def search_descendants(parent): for desc in parent.descendants(as_objects=True): @@ -184,12 +189,18 @@ def search_descendants(parent): or master_name in merge_tables ): continue - master = dj.FreeTable(self.connection, master_name) - if MERGE_PK in master.heading.names: - merge_tables[master_name] = master - search_descendants(master) + master_ft = dj.FreeTable(self.connection, master_name) + if is_merge_table(master_ft): + merge_tables[master_name] = master_ft + if master_name not in visited: + visited.add(master_name) + search_descendants(master_ft) - _ = search_descendants(self) + try: + _ = search_descendants(self) + except NetworkXError as e: + table_name = "".join(e.args[0].split("`")[1:4]) + raise ValueError(f"Please import {table_name} and try again.") logger.info( f"Building merge cache for {self.table_name}.\n\t" @@ -231,6 +242,7 @@ def _get_chain(self, substring) -> TableChains: for name, chain in self._merge_chains.items(): if substring.lower() in name: return chain + raise ValueError(f"No chain found with '{substring}' in name.") def _commit_merge_deletes( self, merge_join_dict: Dict[str, List[QueryExpression]], **kwargs @@ -355,14 +367,19 @@ def _get_exp_summary(self): str Summary of experimenters for session(s). """ + Session = self._delete_deps[-1] SesExp = Session.Experimenter - empty_pk = {self._member_pk: "NULL"} + # Not called in delete permission check, only bare _get_exp_summary + if self._member_pk in self.heading.names: + return self * SesExp + + empty_pk = {self._member_pk: "NULL"} format = dj.U(self._session_pk, self._member_pk) - sess_link = self._session_connection.join( - self.restriction, reverse_order=True - ) + + restr = self.restriction or True + sess_link = self._session_connection.join(restr, reverse_order=True) exp_missing = format & (sess_link - SesExp).proj(**empty_pk) exp_present = format & (sess_link * SesExp - exp_missing).proj() @@ -404,7 +421,10 @@ def _check_delete_permission(self) -> None: if dj_user in LabMember().admin: # bypass permission check for admin return - if not self._session_connection: + if ( + not self._session_connection # Table has no session + or self._member_pk in self.heading.names # Table has experimenter + ): logger.warn( # Permit delete if no session connection "Could not find lab team associated with " + f"{self.__class__.__name__}." @@ -415,7 +435,6 @@ def _check_delete_permission(self) -> None: sess_summary = self._get_exp_summary() experimenters = sess_summary.fetch(self._member_pk) if None in experimenters: - # TODO: Check if allow delete of remainder? raise PermissionError( "Please ensure all Sessions have an experimenter in " + f"SessionExperimenter:\n{sess_summary}" diff --git a/tests/utils/conftest.py b/tests/utils/conftest.py index caa1c6bef..3503f9649 100644 --- a/tests/utils/conftest.py +++ b/tests/utils/conftest.py @@ -32,6 +32,12 @@ def schema_test(teardown, dj_conn): @pytest.fixture(scope="module") def chains(Nwbfile): """Return example TableChains object from Nwbfile.""" + from spyglass.lfp.lfp_merge import LFPOutput # noqa: F401 + from spyglass.linearization.merge import ( + LinearizedPositionOutput, + ) # noqa: F401 + from spyglass.position.position_merge import PositionOutput # noqa: F401 + yield Nwbfile._get_chain("linear") From 6f1e9009e5fa1cd8bb3951a479546767f164f3d6 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 19 Apr 2024 07:42:33 -0700 Subject: [PATCH 20/60] Export logger (#875) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * WIP: rebase Export process * WIP: revise doc * ✅ : Generate working export script * Cleanup: Expand notebook, migrate export process from graph class to export * Revert dj_chains related edits * Update changelog * Revise doc * Address review comments #875 * Remove walrus in eval * prevent log on preview * Fix arg order on fetch, iterate over restr * Add upstream analysis files during cascade. Address false positive fetch * Avoid regen file list on revisit node * Bump Export.Table.restr to mediumblob * Revise Export.Table uniqueness to include export_id --- CHANGELOG.md | 1 + docs/mkdocs.yml | 2 + docs/src/misc/export.md | 131 +++++ notebooks/05_Export.ipynb | 788 ++++++++++++++++++++++++++ notebooks/py_scripts/05_Export.py | 201 +++++++ src/spyglass/common/common_lab.py | 2 +- src/spyglass/common/common_usage.py | 371 ++++++++++++ src/spyglass/settings.py | 9 + src/spyglass/utils/dj_graph.py | 381 +++++++++++++ src/spyglass/utils/dj_helper_fn.py | 9 +- src/spyglass/utils/dj_merge_tables.py | 11 +- src/spyglass/utils/dj_mixin.py | 188 +++++- 12 files changed, 2083 insertions(+), 11 deletions(-) create mode 100644 docs/src/misc/export.md create mode 100644 notebooks/05_Export.ipynb create mode 100644 notebooks/py_scripts/05_Export.py create mode 100644 src/spyglass/utils/dj_graph.py diff --git a/CHANGELOG.md b/CHANGELOG.md index faccf5a1c..05bf14719 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Refactor `TableChain` to include `_searched` attribute. #867 - Fix errors in config import #882 - Save current spyglass version in analysis nwb files to aid diagnosis #897 +- Add functionality to export vertical slice of database. #875 - Add pynapple support #898 - Update PR template checklist to include db changes. #903 - Avoid permission check on personnel tables. #903 diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 996cb36dc..920b646a7 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -51,6 +51,7 @@ nav: - Data Sync: notebooks/02_Data_Sync.ipynb - Merge Tables: notebooks/03_Merge_Tables.ipynb - Config Populate: notebooks/04_PopulateConfigFile.ipynb + - Export: notebooks/05_Export.ipynb - Spikes: - Spike Sorting V0: notebooks/10_Spike_SortingV0.ipynb - Spike Sorting V1: notebooks/10_Spike_SortingV1.ipynb @@ -75,6 +76,7 @@ nav: - Insert Data: misc/insert_data.md - Merge Tables: misc/merge_tables.md - Database Management: misc/database_management.md + - Export: misc/export.md - API Reference: api/ # defer to gen-files + literate-nav - How to Contribute: contribute.md - Change Log: CHANGELOG.md diff --git a/docs/src/misc/export.md b/docs/src/misc/export.md new file mode 100644 index 000000000..ca3884dd7 --- /dev/null +++ b/docs/src/misc/export.md @@ -0,0 +1,131 @@ +# Export Process + +## Why + +DataJoint does not have any built-in functionality for exporting vertical slices +of a database. A lab can maintain a shared DataJoint pipeline across multiple +projects, but conforming to NIH data sharing guidelines may require that data +from only one project be shared during publication. + +## Requirements + +To export data with the current implementation, you must do the following: + +- All custom tables must inherit from `SpyglassMixin` (e.g., + `class MyTable(SpyglassMixin, dj.ManualOrOther):`) +- Only one export can be active at a time. +- Start the export process with `ExportSelection.start_export()`, run all + functions associated with a given analysis, and end the export process with + `ExportSelection.end_export()`. + +## How + +The current implementation relies on two classes in the Spyglass package +(`SpyglassMixin` and `RestrGraph`) and the `Export` tables. + +- `SpyglassMixin`: See `spyglass/utils/dj_mixin.py` +- `RestrGraph`: See `spyglass/utils/dj_graph.py` +- `Export`: See `spyglass/common/common_usage.py` + +### Mixin + +The `SpyglassMixin` class adds functionality to DataJoint tables. A subset of +methods are used to set an environment variable, `SPYGLASS_EXPORT_ID`, and, +while active, intercept all `fetch`/`fetch_nwb` calls to tables. When `fetch` is +called, the mixin grabs the table name and the restriction applied to the table +and stores them in the `ExportSelection` part tables. + +- `fetch_nwb` is specific to Spyglass and logs all analysis nwb files that are + fetched. +- `fetch` is a DataJoint method that retrieves data from a table. + +### Graph + +The `RestrGraph` class uses DataJoint's networkx graph to store each of the +tables and restrictions intercepted by the `SpyglassMixin`'s `fetch` as +'leaves'. The class then cascades these restrictions up from each leaf to all +ancestors. Use is modeled in the methods of `ExportSelection`. + +```python +from spyglass.utils.dj_graph import RestrGraph + +restr_graph = RestrGraph(seed_table=AnyTable, leaves=None, verbose=False) +restr_graph.add_leaves( + leaves=[ + { + "table_name": MyTable.full_table_name, + "restriction": "any_restriction", + }, + { + "table_name": AnotherTable.full_table_name, + "restriction": "another_restriction", + }, + ] +) +restr_graph.cascade() +restricted_leaves = restr_graph.leaf_ft +all_restricted_tables = restr_graph.all_ft +``` + +By default, a `RestrGraph` object is created with a seed table to have access to +a DataJoint connection and graph. One or more leaves can be added at +initialization or later with the `add_leaves` method. The cascade process is +delayed until `cascade`, or another method that requires the cascade, is called. + +Cascading a single leaf involves transforming the leaf's restriction into its +parent's restriction, then repeating the process until all ancestors are +reached. If two leaves share a common ancestor, the restrictions are combined. +This process also accommodates projected fields, which appear as numeric alias +nodes in the graph. + +### Export Table + +The `ExportSelection` is where users should interact with this process. + +```python +from spyglass.common.common_usage import ExportSelection +from spyglass.common.common_usage import Export + +export_key = {paper_id: "my_paper_id", analysis_id: "my_analysis_id"} +ExportSelection().start_export(**export_key) +analysis_data = (MyTable & my_restr).fetch() +analysis_nwb = (MyTable & my_restr).fetch_nwb() +ExportSelection().end_export() + +# Visual inspection +touched_files = ExportSelection.list_file_paths(**export_key) +restricted_leaves = ExportSelection.preview_tables(**export_key) + +# Export +Export().populate_paper(**export_key) +``` + +`Export`'s populate will invoke the `write_export` method to collect cascaded +restrictions and file paths in its part tables, and write out a bash script to +export the data using a series of `mysqldump` commands. The script is saved to +Spyglass's directory, `base_dir/export/paper_id/`, using credentials from +`dj_config`. To use alternative credentials, create a +[mysql config file](https://dev.mysql.com/doc/refman/8.0/en/option-files.html). + +To retain the ability to delete the logging from a particular analysis, the +`export_id` is a combination of the `paper_id` and `analysis_id` in +`ExportSelection`. When populated, the `Export` table, only the maximum +`export_id` for a given `paper_id` is used, resulting in one shell script per +paper. Each shell script one `mysqldump` command per table. + +## External Implementation + +To implement an export for a non-Spyglass database, you will need to ... + +- Create a modified version of `SpyglassMixin`, including ... + - `_export_table` method to lazy load an export table like `ExportSelection` + - `export_id` attribute, plus setter and deleter methods, to manage the status + of the export. + - `fetch` and other methods to intercept and log exported content. +- Create a modified version of `ExportSelection`, that adjusts fields like + `spyglass_version` to match the new database. + +Or, optionally, you can use the `RestrGraph` class to cascade hand-picked tables +and restrictions without the background logging of `SpyglassMixin`. The +assembled list of restricted free tables, `RestrGraph.all_ft`, can be passed to +`Export.write_export` to generate a shell script for exporting the data. diff --git a/notebooks/05_Export.ipynb b/notebooks/05_Export.ipynb new file mode 100644 index 000000000..c3b56aee5 --- /dev/null +++ b/notebooks/05_Export.ipynb @@ -0,0 +1,788 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "# Export\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Intro\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "_Developer Note:_ if you may make a PR in the future, be sure to copy this\n", + "notebook, and use the `gitignore` prefix `temp` to avoid future conflicts.\n", + "\n", + "This is one notebook in a multi-part series on Spyglass.\n", + "\n", + "- To set up your Spyglass environment and database, see\n", + " [the Setup notebook](./00_Setup.ipynb)\n", + "- To insert data, see [the Insert Data notebook](./01_Insert_Data.ipynb)\n", + "- For additional info on DataJoint syntax, including table definitions and\n", + " inserts, see\n", + " [these additional tutorials](https://github.com/datajoint/datajoint-tutorials)\n", + "- For information on what's goint on behind the scenes of an export, see\n", + " [documentation](https://lorenfranklab.github.io/spyglass/0.5/misc/export/)\n", + "\n", + "In short, Spyglass offers the ability to generate exports of one or more subsets\n", + "of the database required for a specific analysis as long as you do the following:\n", + "\n", + "- Inherit `SpyglassMixin` for all custom tables.\n", + "- Run only one export at a time.\n", + "- Start and stop each export logging process.\n", + "\n", + "**NOTE:** For demonstration purposes, this notebook relies on a more populated\n", + "database to highlight restriction merging capabilities of the export process.\n", + "Adjust the restrictions to suit your own dataset.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start by connecting to the database and importing some tables that might\n", + "be used in an analysis.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2024-03-28 16:32:49,766][INFO]: Connecting root@localhost:3309\n", + "[2024-03-28 16:32:49,773][INFO]: Connected root@localhost:3309\n", + "/home/cb/miniconda3/envs/spy/lib/python3.9/site-packages/torch/cuda/__init__.py:83: UserWarning: CUDA initialization: CUDA unknown error - this may be due to an incorrectly set up environment, e.g. changing env variable CUDA_VISIBLE_DEVICES after program start. Setting the available devices to be zero. (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:109.)\n", + " return torch._C._cuda_getDeviceCount() > 0\n" + ] + } + ], + "source": [ + "import os\n", + "import datajoint as dj\n", + "\n", + "# change to the upper level folder to detect dj_local_conf.json\n", + "if os.path.basename(os.getcwd()) == \"notebooks\":\n", + " os.chdir(\"..\")\n", + "dj.config.load(\"dj_local_conf.json\") # load config for database connection info\n", + "\n", + "from spyglass.common.common_usage import Export, ExportSelection\n", + "from spyglass.lfp.analysis.v1 import LFPBandV1\n", + "from spyglass.position.v1 import TrodesPosV1\n", + "from spyglass.spikesorting.v1.curation import CurationV1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Export Tables\n", + "\n", + "The `ExportSelection` table will populate while we conduct the analysis. For\n", + "each file opened and each `fetch` call, an entry will be logged in one of its\n", + "part tables.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "

export_id

\n", + " \n", + "
\n", + "

paper_id

\n", + " \n", + "
\n", + "

analysis_id

\n", + " \n", + "
\n", + "

spyglass_version

\n", + " \n", + "
\n", + "

time

\n", + " \n", + "
\n", + " \n", + "

Total: 0

\n", + " " + ], + "text/plain": [ + "*export_id paper_id analysis_id spyglass_versi time \n", + "+-----------+ +----------+ +------------+ +------------+ +------+\n", + "\n", + " (Total: 0)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "

export_id

\n", + " \n", + "
\n", + "

table_id

\n", + " \n", + "
\n", + "

table_name

\n", + " \n", + "
\n", + "

restriction

\n", + " \n", + "
\n", + " \n", + "

Total: 0

\n", + " " + ], + "text/plain": [ + "*export_id *table_id table_name restriction \n", + "+-----------+ +----------+ +------------+ +------------+\n", + "\n", + " (Total: 0)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection.Table()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "

export_id

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + " \n", + "

Total: 0

\n", + " " + ], + "text/plain": [ + "*export_id *analysis_file\n", + "+-----------+ +------------+\n", + "\n", + " (Total: 0)" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection.File()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Exports are organized around paper and analysis IDs. A single export will be\n", + "generated for each paper, but we can delete/revise logs for each analysis before\n", + "running the export. When we're ready, we can run the `populate_paper` method\n", + "of the `Export` table. By default, export logs will ignore all tables in this\n", + "`common_usage` schema.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Logging\n", + "\n", + "There are a few restrictions to keep in mind when export logging:\n", + "\n", + "- You can only run _ONE_ export at a time.\n", + "- All tables must inherit `SpyglassMixin`\n", + "\n", + "
How to inherit SpyglassMixin\n", + "\n", + "DataJoint tables all inherit from one of the built-in table types.\n", + "\n", + "```python\n", + "class MyTable(dj.Manual):\n", + " ...\n", + "```\n", + "\n", + "To inherit the mixin, simply add it to the `()` of the class before the\n", + "DataJoint class. This can be done for existing tables without dropping them,\n", + "so long as the change has been made prior to export logging.\n", + "\n", + "```python\n", + "from spyglass.utils import SpyglassMixin\n", + "class MyTable(SpyglassMixin, dj.Manual):\n", + " ...\n", + "```\n", + "\n", + "
\n", + "\n", + "Let's start logging for 'paper1'.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[16:32:51][INFO] Spyglass: Starting {'export_id': 1}\n" + ] + } + ], + "source": [ + "paper_key = {\"paper_id\": \"paper1\"}\n", + "\n", + "ExportSelection().start_export(**paper_key, analysis_id=\"analysis1\")\n", + "my_lfp_data = (\n", + " LFPBandV1 # Logging this table\n", + " & \"nwb_file_name LIKE 'med%'\" # using a string restriction\n", + " & {\"filter_name\": \"Theta 5-11 Hz\"} # and a dictionary restriction\n", + ").fetch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can check that it was logged. The syntax of the restriction will look\n", + "different from what we see in python, but the `preview_tables` will look\n", + "familiar.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "
\n", + "

export_id

\n", + " \n", + "
\n", + "

table_id

\n", + " \n", + "
\n", + "

table_name

\n", + " \n", + "
\n", + "

restriction

\n", + " \n", + "
11`lfp_band_v1`.`__l_f_p_band_v1` (( ((nwb_file_name LIKE 'med%%%%%%%%')))AND( ((`filter_name`=\"Theta 5-11 Hz\"))))
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*export_id *table_id table_name restriction \n", + "+-----------+ +----------+ +------------+ +------------+\n", + "1 1 `lfp_band_v1`. (( ((nwb_file\n", + " (Total: 1)" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection.Table()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And log more under the same analysis ...\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "my_other_lfp_data = (\n", + " LFPBandV1\n", + " & {\n", + " \"nwb_file_name\": \"mediumnwb20230802_.nwb\",\n", + " \"filter_name\": \"Theta 5-10 Hz\",\n", + " }\n", + ").fetch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since these restrictions are mutually exclusive, we can check that the will\n", + "be combined appropriately by priviewing the logged tables...\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[FreeTable(`lfp_band_v1`.`__l_f_p_band_v1`)\n", + " *lfp_merge_id *filter_name *filter_sampli *nwb_file_name *target_interv *lfp_band_samp analysis_file_ interval_list_ lfp_band_objec\n", + " +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + " 0f3bb01e-0ef6- Theta 5-10 Hz 1000 mediumnwb20230 pos 0 valid ti 100 mediumnwb20230 pos 0 valid ti 44e38dc1-3779-\n", + " 0f3bb01e-0ef6- Theta 5-11 Hz 1000 mediumnwb20230 pos 0 valid ti 100 mediumnwb20230 pos 0 valid ti c9b93111-decb-\n", + " (Total: 2)]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection().preview_tables(**paper_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's try adding a new analysis with a fetched nwb file. Starting a new export\n", + "will stop the previous one.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[16:32:51][INFO] Spyglass: Export 1 in progress. Starting new.\n", + "[16:32:51][INFO] Spyglass: Starting {'export_id': 2}\n" + ] + } + ], + "source": [ + "ExportSelection().start_export(**paper_key, analysis_id=\"analysis2\")\n", + "curation_nwb = (CurationV1 & \"curation_id = 1\").fetch_nwb()\n", + "trodes_data = (TrodesPosV1 & 'trodes_pos_params_name = \"single_led\"').fetch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can check that the right files were logged with the following...\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'file_path': '/home/cb/wrk/alt/data/raw/mediumnwb20230802_.nwb'},\n", + " {'file_path': '/home/cb/wrk/alt/data/analysis/mediumnwb20230802/mediumnwb20230802_ALNN6TZ4L7.nwb'}]" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ExportSelection().list_file_paths(paper_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And stop the export with ...\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "ExportSelection().stop_export()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate\n", + "\n", + "The `Export` table has a `populate_paper` method that will generate an export\n", + "bash script for the tables required by your analysis, including all the upstream\n", + "tables you didn't directly need, like `Subject` and `Session`.\n", + "\n", + "**NOTE:** Populating the export for a given paper will overwrite any previous\n", + "runs. For example, if you ran an export, and then added a third analysis for the\n", + "same paper, generating another export will delete any existing bash script and\n", + "`Export` table entries for the previous run.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[16:32:51][INFO] Spyglass: Export script written to /home/cb/wrk/alt/data/export/paper1/_ExportSQL_paper1.sh\n" + ] + } + ], + "source": [ + "Export().populate_paper(**paper_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default the export script will be located in an `export` folder within your\n", + "`SPYGLASS_BASE_DIR`. This default can be changed by adjusting your `dj.config`.\n", + "\n", + "Frank Lab members will need the help of a database admin (e.g., Chris) to\n", + "run the resulting bash script. The result will be a `.sql` file that anyone\n", + "can use to replicate the database entries you used in your analysis.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Up Next\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the [next notebook](./10_Spike_Sorting.ipynb), we'll start working with\n", + "ephys data with spike sorting.\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "spy", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/py_scripts/05_Export.py b/notebooks/py_scripts/05_Export.py new file mode 100644 index 000000000..b180832d4 --- /dev/null +++ b/notebooks/py_scripts/05_Export.py @@ -0,0 +1,201 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.16.0 +# kernelspec: +# display_name: spy +# language: python +# name: python3 +# --- + +# # Export +# + +# ## Intro +# + +# _Developer Note:_ if you may make a PR in the future, be sure to copy this +# notebook, and use the `gitignore` prefix `temp` to avoid future conflicts. +# +# This is one notebook in a multi-part series on Spyglass. +# +# - To set up your Spyglass environment and database, see +# [the Setup notebook](./00_Setup.ipynb) +# - To insert data, see [the Insert Data notebook](./01_Insert_Data.ipynb) +# - For additional info on DataJoint syntax, including table definitions and +# inserts, see +# [these additional tutorials](https://github.com/datajoint/datajoint-tutorials) +# - For information on what's goint on behind the scenes of an export, see +# [documentation](https://lorenfranklab.github.io/spyglass/0.5/misc/export/) +# +# In short, Spyglass offers the ability to generate exports of one or more subsets +# of the database required for a specific analysis as long as you do the following: +# +# - Inherit `SpyglassMixin` for all custom tables. +# - Run only one export at a time. +# - Start and stop each export logging process. +# +# **NOTE:** For demonstration purposes, this notebook relies on a more populated +# database to highlight restriction merging capabilities of the export process. +# Adjust the restrictions to suit your own dataset. +# + +# ## Imports +# + +# Let's start by connecting to the database and importing some tables that might +# be used in an analysis. +# + +# + +import os +import datajoint as dj + +# change to the upper level folder to detect dj_local_conf.json +if os.path.basename(os.getcwd()) == "notebooks": + os.chdir("..") +dj.config.load("dj_local_conf.json") # load config for database connection info + +from spyglass.common.common_usage import Export, ExportSelection +from spyglass.lfp.analysis.v1 import LFPBandV1 +from spyglass.position.v1 import TrodesPosV1 +from spyglass.spikesorting.v1.curation import CurationV1 + +# - + +# ## Export Tables +# +# The `ExportSelection` table will populate while we conduct the analysis. For +# each file opened and each `fetch` call, an entry will be logged in one of its +# part tables. +# + +ExportSelection() + +ExportSelection.Table() + +ExportSelection.File() + +# Exports are organized around paper and analysis IDs. A single export will be +# generated for each paper, but we can delete/revise logs for each analysis before +# running the export. When we're ready, we can run the `populate_paper` method +# of the `Export` table. By default, export logs will ignore all tables in this +# `common_usage` schema. +# + +# ## Logging +# +# There are a few restrictions to keep in mind when export logging: +# +# - You can only run _ONE_ export at a time. +# - All tables must inherit `SpyglassMixin` +# +#
How to inherit SpyglassMixin +# +# DataJoint tables all inherit from one of the built-in table types. +# +# ```python +# class MyTable(dj.Manual): +# ... +# ``` +# +# To inherit the mixin, simply add it to the `()` of the class before the +# DataJoint class. This can be done for existing tables without dropping them, +# so long as the change has been made prior to export logging. +# +# ```python +# from spyglass.utils import SpyglassMixin +# class MyTable(SpyglassMixin, dj.Manual): +# ... +# ``` +# +#
+# +# Let's start logging for 'paper1'. +# + +# + +paper_key = {"paper_id": "paper1"} + +ExportSelection().start_export(**paper_key, analysis_id="analysis1") +my_lfp_data = ( + LFPBandV1 # Logging this table + & "nwb_file_name LIKE 'med%'" # using a string restriction + & {"filter_name": "Theta 5-11 Hz"} # and a dictionary restriction +).fetch() +# - + +# We can check that it was logged. The syntax of the restriction will look +# different from what we see in python, but the `preview_tables` will look +# familiar. +# + +ExportSelection.Table() + +# And log more under the same analysis ... +# + +my_other_lfp_data = ( + LFPBandV1 + & { + "nwb_file_name": "mediumnwb20230802_.nwb", + "filter_name": "Theta 5-10 Hz", + } +).fetch() + +# Since these restrictions are mutually exclusive, we can check that the will +# be combined appropriately by priviewing the logged tables... +# + +ExportSelection().preview_tables(**paper_key) + +# Let's try adding a new analysis with a fetched nwb file. Starting a new export +# will stop the previous one. +# + +ExportSelection().start_export(**paper_key, analysis_id="analysis2") +curation_nwb = (CurationV1 & "curation_id = 1").fetch_nwb() +trodes_data = (TrodesPosV1 & 'trodes_pos_params_name = "single_led"').fetch() + +# We can check that the right files were logged with the following... +# + +ExportSelection().list_file_paths(paper_key) + +# And stop the export with ... +# + +ExportSelection().stop_export() + +# ## Populate +# +# The `Export` table has a `populate_paper` method that will generate an export +# bash script for the tables required by your analysis, including all the upstream +# tables you didn't directly need, like `Subject` and `Session`. +# +# **NOTE:** Populating the export for a given paper will overwrite any previous +# runs. For example, if you ran an export, and then added a third analysis for the +# same paper, generating another export will delete any existing bash script and +# `Export` table entries for the previous run. +# + +Export().populate_paper(**paper_key) + +# By default the export script will be located in an `export` folder within your +# `SPYGLASS_BASE_DIR`. This default can be changed by adjusting your `dj.config`. +# +# Frank Lab members will need the help of a database admin (e.g., Chris) to +# run the resulting bash script. The result will be a `.sql` file that anyone +# can use to replicate the database entries you used in your analysis. +# + +# ## Up Next +# + +# In the [next notebook](./10_Spike_Sorting.ipynb), we'll start working with +# ephys data with spike sorting. +# diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index a6a162b2b..c5a6fbc00 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -92,7 +92,7 @@ def _load_admin(cls): """Load admin list.""" cls._admin = list( (cls.LabMemberInfo & {"admin": True}).fetch("datajoint_user_name") - ) + ) + ["root"] @property def admin(cls) -> list: diff --git a/src/spyglass/common/common_usage.py b/src/spyglass/common/common_usage.py index fdf7ae99d..dae4f7842 100644 --- a/src/spyglass/common/common_usage.py +++ b/src/spyglass/common/common_usage.py @@ -6,7 +6,18 @@ plan future development of Spyglass. """ +from pathlib import Path +from typing import List, Union + import datajoint as dj +from datajoint import FreeTable +from datajoint import config as dj_config + +from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile +from spyglass.settings import export_dir +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.dj_graph import RestrGraph +from spyglass.utils.dj_helper_fn import unique_dicts schema = dj.schema("common_usage") @@ -37,3 +48,363 @@ class InsertError(dj.Manual): error_message: varchar(255) error_raw = null: blob """ + + +@schema +class ExportSelection(SpyglassMixin, dj.Manual): + definition = """ + export_id: int auto_increment + --- + paper_id: varchar(32) + analysis_id: varchar(32) + spyglass_version: varchar(16) + time=CURRENT_TIMESTAMP: timestamp + unique index (paper_id, analysis_id) + """ + + class Table(SpyglassMixin, dj.Part): + definition = """ + -> master + table_id: int + --- + table_name: varchar(128) + restriction: varchar(2048) + """ + + def insert1(self, key, **kwargs): + key = self._auto_increment(key, pk="table_id") + super().insert1(key, **kwargs) + + def insert(self, keys: List[dict], **kwargs): + if not isinstance(keys[0], dict): + raise TypeError("Pass Table Keys as list of dict") + keys = [self._auto_increment(k, pk="table_id") for k in keys] + super().insert(keys, **kwargs) + + class File(SpyglassMixin, dj.Part): + definition = """ + -> master + -> AnalysisNwbfile + """ + # Note: only tracks AnalysisNwbfile. list_file_paths also grabs Nwbfile. + + def insert1_return_pk(self, key: dict, **kwargs) -> int: + """Custom insert to return export_id.""" + status = "Resuming" + if not (query := self & key): + key = self._auto_increment(key, pk="export_id") + super().insert1(key, **kwargs) + status = "Starting" + export_id = query.fetch1("export_id") + export_key = {"export_id": export_id} + if query := (Export & export_key): + query.super_delete(warn=False) + logger.info(f"{status} {export_key}") + return export_id + + def start_export(self, paper_id, analysis_id) -> None: + """Start logging a new export.""" + self._start_export(paper_id, analysis_id) + + def stop_export(self, **kwargs) -> None: + """Stop logging the current export.""" + self._stop_export() + + # NOTE: These helpers could be moved to table below, but I think + # end users may want to use them to check what's in the export log + # before actually exporting anything, which is more associated with + # Selection + + def list_file_paths(self, key: dict) -> list[str]: + """Return a list of unique file paths for a given restriction/key. + + Note: This list reflects files fetched during the export process. For + upstream files, use RestrGraph.file_paths. + """ + file_table = self * self.File & key + analysis_fp = [ + AnalysisNwbfile().get_abs_path(fname) + for fname in file_table.fetch("analysis_file_name") + ] + nwbfile_fp = [ + Nwbfile().get_abs_path(fname) + for fname in (AnalysisNwbfile * file_table).fetch("nwb_file_name") + ] + return [{"file_path": p} for p in list({*analysis_fp, *nwbfile_fp})] + + def get_restr_graph(self, key: dict, verbose=False) -> RestrGraph: + """Return a RestrGraph for a restriction/key's tables/restrictions. + + Ignores duplicate entries. + + Parameters + ---------- + key : dict + Any valid restriction key for ExportSelection.Table + verbose : bool, optional + Turn on RestrGraph verbosity. Default False. + """ + leaves = unique_dicts( + (self * self.Table & key).fetch( + "table_name", "restriction", as_dict=True + ) + ) + return RestrGraph(seed_table=self, leaves=leaves, verbose=verbose) + + def preview_tables(self, **kwargs) -> list[dj.FreeTable]: + """Return a list of restricted FreeTables for a given restriction/key. + + Useful for checking what will be exported. + """ + return self.get_restr_graph(kwargs).leaf_ft + + def _max_export_id(self, paper_id: str, return_all=False) -> int: + """Return last export associated with a given paper id. + + Used to populate Export table.""" + if isinstance(paper_id, dict): + paper_id = paper_id.get("paper_id") + if not (query := self & {"paper_id": paper_id}): + return None + all_export_ids = query.fetch("export_id") + return all_export_ids if return_all else max(all_export_ids) + + def paper_export_id(self, paper_id: str) -> dict: + """Return the maximum export_id for a paper, used to populate Export.""" + return {"export_id": self._max_export_id(paper_id)} + + +@schema +class Export(SpyglassMixin, dj.Computed): + definition = """ + -> ExportSelection + --- + paper_id: varchar(32) + """ + + # In order to get a many-to-one relationship btwn Selection and Export, + # we ignore all but the last export_id. If more exports are added above, + # generating a new output will overwrite the old ones. + + class Table(SpyglassMixin, dj.Part): + definition = """ + -> master + table_id: int + --- + table_name: varchar(128) + restriction: mediumblob + unique index (export_id, table_name) + """ + + class File(SpyglassMixin, dj.Part): + definition = """ + -> master + file_id: int + --- + file_path: varchar(255) + """ + + def populate_paper(self, paper_id: Union[str, dict]): + if isinstance(paper_id, dict): + paper_id = paper_id.get("paper_id") + self.populate(ExportSelection().paper_export_id(paper_id)) + + def make(self, key): + query = ExportSelection & key + paper_key = query.fetch("paper_id", as_dict=True)[0] + + # Null insertion if export_id is not the maximum for the paper + all_export_ids = query._max_export_id(paper_key, return_all=True) + max_export_id = max(all_export_ids) + if key.get("export_id") != max_export_id: + logger.info( + f"Skipping export_id {key['export_id']}, use {max_export_id}" + ) + self.insert1(key) + return + + # If lesser ids are present, delete parts yielding null entries + processed_ids = set( + list(self.Table.fetch("export_id")) + + list(self.File.fetch("export_id")) + ) + if overlap := set(all_export_ids) - {max_export_id} & processed_ids: + logger.info(f"Overwriting export_ids {overlap}") + for export_id in overlap: + id_dict = {"export_id": export_id} + (self.Table & id_dict).delete_quick() + (self.Table & id_dict).delete_quick() + + restr_graph = query.get_restr_graph(paper_key) + file_paths = unique_dicts( # Original plus upstream files + query.list_file_paths(paper_key) + restr_graph.file_paths + ) + + table_inserts = [ + {**key, **rd, "table_id": i} + for i, rd in enumerate(restr_graph.as_dict) + ] + file_inserts = [ + {**key, **fp, "file_id": i} for i, fp in enumerate(file_paths) + ] + + # Writes but does not run mysqldump. Assumes single version per paper. + version_key = query.fetch("spyglass_version", as_dict=True)[0] + self.write_export( + free_tables=restr_graph.all_ft, **paper_key, **version_key + ) + + self.insert1({**key, **paper_key}) + self.Table().insert(table_inserts) + self.File().insert(file_inserts) + + def _get_credentials(self): + """Get credentials for database connection.""" + return { + "user": dj_config["database.user"], + "password": dj_config["database.password"], + "host": dj_config["database.host"], + } + + def _write_sql_cnf(self): + """Write SQL cnf file to avoid password prompt.""" + cnf_path = Path("~/.my.cnf").expanduser() + + if cnf_path.exists(): + return + + template = "[client]\nuser={user}\npassword={password}\nhost={host}\n" + + with open(str(cnf_path), "w") as file: + file.write(template.format(**self._get_credentials())) + cnf_path.chmod(0o600) + + def _bash_escape(self, s): + """Escape restriction string for bash.""" + s = s.strip() + + replace_map = { + "WHERE ": "", # Remove preceding WHERE of dj.where_clause + " ": " ", # Squash double spaces + "( (": "((", # Squash double parens + ") )": ")", + '"': "'", # Replace double quotes with single + "`": "", # Remove backticks + " AND ": " \\\n\tAND ", # Add newline and tab for readability + " OR ": " \\\n\tOR ", # OR extra space to align with AND + ")AND(": ") \\\n\tAND (", + ")OR(": ") \\\n\tOR (", + "#": "\\#", + } + for old, new in replace_map.items(): + s = s.replace(old, new) + if s.startswith("(((") and s.endswith(")))"): + s = s[2:-2] # Remove extra parens for readability + return s + + def _cmd_prefix(self, docker_id=None): + """Get prefix for mysqldump command. Includes docker exec if needed.""" + if not docker_id: + return "mysqldump " + return ( + f"docker exec -i {docker_id} \\\n\tmysqldump " + + "-u {user} --password={password} \\\n\t".format( + **self._get_credentials() + ) + ) + + def _write_mysqldump( + self, + free_tables: List[FreeTable], + paper_id: str, + docker_id=None, + spyglass_version=None, + ): + """Write mysqlmdump.sh script to export data. + + Parameters + ---------- + paper_id : str + Paper ID to use for export file names + docker_id : str, optional + Docker container ID to export from. Default None + spyglass_version : str, optional + Spyglass version to include in export. Default None + """ + paper_dir = Path(export_dir) / paper_id if not docker_id else Path(".") + paper_dir.mkdir(exist_ok=True) + + dump_script = paper_dir / f"_ExportSQL_{paper_id}.sh" + dump_content = paper_dir / f"_Populate_{paper_id}.sql" + + prefix = self._cmd_prefix(docker_id) + version = ( # Include spyglass version as comment in dump + "echo '--'\n" + + f"echo '-- SPYGLASS VERSION: {spyglass_version} --'\n" + + "echo '--'\n\n" + if spyglass_version + else "" + ) + create_cmd = ( + "echo 'CREATE DATABASE IF NOT EXISTS {database}; " + + "USE {database};'\n\n" + ) + dump_cmd = prefix + '{database} {table} --where="\\\n\t{where}"\n\n' + + tables_by_db = sorted(free_tables, key=lambda x: x.full_table_name) + + with open(dump_script, "w") as file: + file.write( + "#!/bin/bash\n\n" + + f"exec > {dump_content}\n\n" # Redirect output to sql file + + f"{version}" # Include spyglass version as comment + ) + + prev_db = None + for table in tables_by_db: + if not (where := table.where_clause()): + continue + where = self._bash_escape(where) + database, table_name = ( + table.full_table_name.replace("`", "") + .replace("#", "\\#") + .split(".") + ) + if database != prev_db: + file.write(create_cmd.format(database=database)) + prev_db = database + file.write( + dump_cmd.format( + database=database, table=table_name, where=where + ) + ) + logger.info(f"Export script written to {dump_script}") + + def write_export( + self, + free_tables: List[FreeTable], + paper_id: str, + docker_id=None, + spyglass_version=None, + ): + """Write export bash script for all tables in graph. + + Also writes a user-specific .my.cnf file to avoid password prompt. + + Parameters + ---------- + free_tables : List[FreeTable] + List of restricted FreeTables to export + paper_id : str + Paper ID to use for export file names + docker_id : str, optional + Docker container ID to export from. Default None + spyglass_version : str, optional + Spyglass version to include in export. Default None + """ + self._write_sql_cnf() + self._write_mysqldump( + free_tables, paper_id, docker_id, spyglass_version + ) + + # TODO: export conda env diff --git a/src/spyglass/settings.py b/src/spyglass/settings.py index af16e688d..be2912c9d 100644 --- a/src/spyglass/settings.py +++ b/src/spyglass/settings.py @@ -60,6 +60,7 @@ def __init__(self, base_dir: str = None, **kwargs): self.relative_dirs = { # {PREFIX}_{KEY}_DIR, default dir relative to base_dir + # NOTE: Adding new dir requires edit to HHMI hub "spyglass": { "raw": "raw", "analysis": "analysis", @@ -68,6 +69,7 @@ def __init__(self, base_dir: str = None, **kwargs): "waveforms": "waveforms", "temp": "tmp", "video": "video", + "export": "export", }, "kachery": { "cloud": ".kachery-cloud", @@ -459,6 +461,7 @@ def _dj_custom(self) -> dict: "waveforms": self.waveforms_dir, "temp": self.temp_dir, "video": self.video_dir, + "export": self.export_dir, }, "kachery_dirs": { "cloud": self.config.get( @@ -516,6 +519,10 @@ def temp_dir(self) -> str: def video_dir(self) -> str: return self.config.get(self.dir_to_var("video")) + @property + def export_dir(self) -> str: + return self.config.get(self.dir_to_var("export")) + @property def debug_mode(self) -> bool: """Returns True if debug_mode is set. @@ -560,6 +567,7 @@ def dlc_output_dir(self) -> str: sorting_dir = None waveforms_dir = None video_dir = None + export_dir = None dlc_project_dir = None dlc_video_dir = None dlc_output_dir = None @@ -573,6 +581,7 @@ def dlc_output_dir(self) -> str: sorting_dir = sg_config.sorting_dir waveforms_dir = sg_config.waveforms_dir video_dir = sg_config.video_dir + export_dir = sg_config.export_dir debug_mode = sg_config.debug_mode test_mode = sg_config.test_mode prepopulate = config.get("prepopulate", False) diff --git a/src/spyglass/utils/dj_graph.py b/src/spyglass/utils/dj_graph.py new file mode 100644 index 000000000..59e7497d5 --- /dev/null +++ b/src/spyglass/utils/dj_graph.py @@ -0,0 +1,381 @@ +"""DataJoint graph traversal and restriction application. + +NOTE: read `ft` as FreeTable and `restr` as restriction. +""" + +from typing import Dict, List, Union + +from datajoint import FreeTable +from datajoint.condition import make_condition +from datajoint.table import Table +from tqdm import tqdm + +from spyglass.common import AnalysisNwbfile +from spyglass.utils import logger +from spyglass.utils.dj_helper_fn import unique_dicts + + +class RestrGraph: + def __init__( + self, + seed_table: Table, + table_name: str = None, + restriction: str = None, + leaves: List[Dict[str, str]] = None, + verbose: bool = False, + **kwargs, + ): + """Use graph to cascade restrictions up from leaves to all ancestors. + + Parameters + ---------- + seed_table : Table + Table to use to establish connection and graph + table_name : str, optional + Table name of single leaf, default None + restriction : str, optional + Restriction to apply to leaf. default None + leaves : Dict[str, str], optional + List of dictionaries with keys table_name and restriction. One + entry per leaf node. Default None. + verbose : bool, optional + Whether to print verbose output. Default False + """ + + self.connection = seed_table.connection + self.graph = seed_table.connection.dependencies + self.graph.load() + + self.verbose = verbose + self.cascaded = False + self.ancestors = set() + self.visited = set() + self.leaves = set() + self.analysis_pk = AnalysisNwbfile().primary_key + + if table_name and restriction: + self.add_leaf(table_name, restriction) + if leaves: + self.add_leaves(leaves, show_progress=verbose) + + def __repr__(self): + l_str = ",\n\t".join(self.leaves) + "\n" if self.leaves else "" + processed = "Cascaded" if self.cascaded else "Uncascaded" + return f"{processed} RestrictionGraph(\n\t{l_str})" + + @property + def all_ft(self): + """Get restricted FreeTables from all visited nodes.""" + self.cascade() + return [self._get_ft(table, with_restr=True) for table in self.visited] + + @property + def leaf_ft(self): + """Get restricted FreeTables from graph leaves.""" + return [self._get_ft(table, with_restr=True) for table in self.leaves] + + def _get_node(self, table): + """Get node from graph.""" + if not (node := self.graph.nodes.get(table)): + raise ValueError( + f"Table {table} not found in graph." + + "\n\tPlease import this table and rerun" + ) + return node + + def _set_node(self, table, attr="ft", value=None): + """Set attribute on node. General helper for various attributes.""" + _ = self._get_node(table) # Ensure node exists + self.graph.nodes[table][attr] = value + + def _get_ft(self, table, with_restr=False): + """Get FreeTable from graph node. If one doesn't exist, create it.""" + table = table if isinstance(table, str) else table.full_table_name + restr = self._get_restr(table) if with_restr else True + if ft := self._get_node(table).get("ft"): + return ft & restr + ft = FreeTable(self.connection, table) + self._set_node(table, "ft", ft) + return ft & restr + + def _get_restr(self, table): + """Get restriction from graph node.""" + table = table if isinstance(table, str) else table.full_table_name + return self._get_node(table).get("restr", "False") + + def _get_files(self, table): + """Get analysis files from graph node.""" + return self._get_node(table).get("files", []) + + def _set_restr(self, table, restriction): + """Add restriction to graph node. If one exists, merge with new.""" + ft = self._get_ft(table) + restriction = ( # Convert to condition if list or dict + make_condition(ft, restriction, set()) + if not isinstance(restriction, str) + else restriction + ) + # orig_restr = restriction + if existing := self._get_restr(table): + if existing == restriction: + return + join = ft & [existing, restriction] + if len(join) == len(ft & existing): + return # restriction is a subset of existing + restriction = make_condition( + ft, unique_dicts(join.fetch("KEY", as_dict=True)), set() + ) + + # if table == "`spikesorting_merge`.`spike_sorting_output`": + # __import__("pdb").set_trace() + + self._set_node(table, "restr", restriction) + + def get_restr_ft(self, table: Union[int, str]): + """Get restricted FreeTable from graph node. + + Currently used. May be useful for debugging. + + Parameters + ---------- + table : Union[int, str] + Table name or index in visited set + """ + if isinstance(table, int): + table = list(self.visited)[table] + return self._get_ft(table, with_restr=True) + + def _log_truncate(self, log_str, max_len=80): + """Truncate log lines to max_len and print if verbose.""" + if not self.verbose: + return + logger.info( + log_str[:max_len] + "..." if len(log_str) > max_len else log_str + ) + + def _child_to_parent( + self, + child, + parent, + restriction, + attr_map=None, + primary=True, + **kwargs, + ) -> List[Dict[str, str]]: + """Given a child, child's restr, and parent, get parent's restr. + + Parameters + ---------- + child : str + child table name + parent : str + parent table name + restriction : str + restriction to apply to child + attr_map : dict, optional + dictionary mapping aliases across parend/child, as pulled from + DataJoint-assembled graph. Default None. Func will flip this dict + to convert from child to parent fields. + primary : bool, optional + Is parent in child's primary key? Default True. Also derived from + DataJoint-assembled graph. If True, project only primary key fields + to avoid secondary key collisions. + + Returns + ------- + List[Dict[str, str]] + List of dicts containing primary key fields for restricted parent + table. + """ + + # Need to flip attr_map to respect parent's fields + attr_reverse = ( + {v: k for k, v in attr_map.items() if k != v} if attr_map else {} + ) + child_ft = self._get_ft(child) + parent_ft = self._get_ft(parent).proj() + restr = restriction or self._get_restr(child_ft) or True + restr_child = child_ft & restr + + if primary: # Project only primary key fields to avoid collisions + join = restr_child.proj(**attr_reverse) * parent_ft + else: # Include all fields + join = restr_child.proj(..., **attr_reverse) * parent_ft + + ret = unique_dicts(join.fetch(*parent_ft.primary_key, as_dict=True)) + + if len(ret) == len(parent_ft): + self._log_truncate(f"NULL restr {parent}") + + return ret + + def cascade_files(self): + """Set node attribute for analysis files.""" + for table in self.visited: + ft = self._get_ft(table) + if not set(self.analysis_pk).issubset(ft.heading.names): + continue + files = (ft & self._get_restr(table)).fetch(*self.analysis_pk) + self._set_node(table, "files", files) + + def cascade1(self, table, restriction): + """Cascade a restriction up the graph, recursively on parents. + + Parameters + ---------- + table : str + table name + restriction : str + restriction to apply + """ + self._set_restr(table, restriction) + self.visited.add(table) + + for parent, data in self.graph.parents(table).items(): + if parent in self.visited: + continue + + if parent.isnumeric(): + parent, data = self.graph.parents(parent).popitem() + + parent_restr = self._child_to_parent( + child=table, + parent=parent, + restriction=restriction, + **data, + ) + + self.cascade1(parent, parent_restr) # Parent set on recursion + + def cascade(self, show_progress=None) -> None: + """Cascade all restrictions up the graph. + + Parameters + ---------- + show_progress : bool, optional + Show tqdm progress bar. Default to verbose setting. + """ + if self.cascaded: + return + to_visit = self.leaves - self.visited + for table in tqdm( + to_visit, + desc="RestrGraph: cascading restrictions", + total=len(to_visit), + disable=not (show_progress or self.verbose), + ): + restr = self._get_restr(table) + self._log_truncate(f"Start {table}: {restr}") + self.cascade1(table, restr) + if not self.visited == self.ancestors: + raise RuntimeError( + "Cascade: FAIL - incomplete cascade. Please post issue." + ) + + self.cascade_files() + self.cascaded = True + + def add_leaf(self, table_name, restriction, cascade=False) -> None: + """Add leaf to graph and cascade if requested. + + Parameters + ---------- + table_name : str + table name of leaf + restriction : str + restriction to apply to leaf + """ + new_ancestors = set(self._get_ft(table_name).ancestors()) + self.ancestors |= new_ancestors # Add to total ancestors + self.visited -= new_ancestors # Remove from visited to revisit + + self.leaves.add(table_name) + self._set_restr(table_name, restriction) # Redundant if cascaded + + if cascade: + self.cascade1(table_name, restriction) + self.cascade_files() + self.cascaded = True + + def add_leaves( + self, leaves: List[Dict[str, str]], cascade=False, show_progress=None + ) -> None: + """Add leaves to graph and cascade if requested. + + Parameters + ---------- + leaves : List[Dict[str, str]] + list of dictionaries containing table_name and restriction + cascade : bool, optional + Whether to cascade the restrictions up the graph. Default False + show_progress : bool, optional + Show tqdm progress bar. Default to verbose setting. + """ + + if not leaves: + return + if not isinstance(leaves, list): + leaves = [leaves] + leaves = unique_dicts(leaves) + for leaf in tqdm( + leaves, + desc="RestrGraph: adding leaves", + total=len(leaves), + disable=not (show_progress or self.verbose), + ): + if not ( + (table_name := leaf.get("table_name")) + and (restriction := leaf.get("restriction")) + ): + raise ValueError( + f"Leaf must have table_name and restriction: {leaf}" + ) + self.add_leaf(table_name, restriction, cascade=False) + if cascade: + self.cascade() + self.cascade_files() + + @property + def as_dict(self) -> List[Dict[str, str]]: + """Return as a list of dictionaries of table_name: restriction""" + self.cascade() + return [ + {"table_name": table, "restriction": self._get_restr(table)} + for table in self.ancestors + if self._get_restr(table) + ] + + @property + def file_dict(self) -> Dict[str, List[str]]: + """Return dictionary of analysis files from all visited nodes. + + Currently unused, but could be useful for debugging. + """ + if not self.cascaded: + logger.warning("Uncascaded graph. Using leaves only.") + table_list = self.leaves + else: + table_list = self.visited + + return { + table: self._get_files(table) + for table in table_list + if any(self._get_files(table)) + } + + @property + def file_paths(self) -> List[str]: + """Return list of unique analysis files from all visited nodes. + + This covers intermediate analysis files that may not have been fetched + directly by the user. + """ + self.cascade() + unique_files = set( + [file for table in self.visited for file in self._get_files(table)] + ) + return [ + {"file_path": AnalysisNwbfile().get_abs_path(file)} + for file in unique_files + if file is not None + ] diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 95eeb541f..44321e10a 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -11,6 +11,11 @@ from spyglass.utils.nwb_helper_fn import get_nwb_file +def unique_dicts(list_of_dict): + """Remove duplicate dictionaries from a list.""" + return [dict(t) for t in {tuple(d.items()) for d in list_of_dict}] + + def deprecated_factory(classes: list, old_module: str = "") -> list: """Creates a list of classes and logs a warning when instantiated @@ -182,8 +187,8 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): ) for file_name in nwb_files: file_path = file_path_fn(file_name) - if not os.path.exists(file_path): - # retrieve the file from kachery. This also opens the file and stores the file object + if not os.path.exists(file_path): # retrieve the file from kachery. + # This also opens the file and stores the file object get_nwb_file(file_path) rec_dicts = ( diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index 1e14468da..f94645ccf 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -806,13 +806,14 @@ def delete(self, force_permission=False, *args, **kwargs): ): part.delete(force_permission=force_permission, *args, **kwargs) - def super_delete(self, *args, **kwargs): + def super_delete(self, warn=True, *args, **kwargs): """Alias for datajoint.table.Table.delete. - Added to support MRO of SpyglassMixin""" - logger.warning("!! Using super_delete. Bypassing cautious_delete !!") - - self._log_use(start=time(), super_delete=True) + Added to support MRO of SpyglassMixin + """ + if warn: + logger.warning("!! Bypassing cautious_delete !!") + self._log_use(start=time(), super_delete=True) super().delete(*args, **kwargs) diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 9377b30c2..7b42088b4 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -1,9 +1,14 @@ +from atexit import register as exit_register +from atexit import unregister as exit_unregister from collections import OrderedDict from functools import cached_property +from inspect import stack as inspect_stack +from os import environ from time import time from typing import Dict, List, Union import datajoint as dj +from datajoint.condition import make_condition from datajoint.errors import DataJointError from datajoint.expression import QueryExpression from datajoint.logging import logger as dj_logger @@ -24,6 +29,8 @@ except ImportError: pynapple = None +EXPORT_ENV_VAR = "SPYGLASS_EXPORT_ID" + class SpyglassMixin: """Mixin for Spyglass DataJoint tables. @@ -128,7 +135,32 @@ def fetch_nwb(self, *attrs, **kwargs): AnalysisNwbfile (i.e., "-> (Analysis)Nwbfile" in definition) or a _nwb_table attribute. If both are present, the attribute takes precedence. + + Additional logic support Export table logging. """ + table, tbl_attr = self._nwb_table_tuple + if self.export_id and "analysis" in tbl_attr: + tbl_pk = "analysis_file_name" + fnames = (self * table).fetch(tbl_pk) + logger.debug( + f"Export {self.export_id}: fetch_nwb {self.table_name}, {fnames}" + ) + self._export_table.File.insert( + [ + {"export_id": self.export_id, tbl_pk: fname} + for fname in fnames + ], + skip_duplicates=True, + ) + self._export_table.Table.insert1( + dict( + export_id=self.export_id, + table_name=self.full_table_name, + restriction=make_condition(self, self.restriction, set()), + ), + skip_duplicates=True, + ) + return fetch_nwb(self, self._nwb_table_tuple, *attrs, **kwargs) def fetch_pynapple(self, *attrs, **kwargs): @@ -182,6 +214,10 @@ def _merge_tables(self) -> Dict[str, dj.FreeTable]: visited = set() def search_descendants(parent): + # TODO: Add check that parents are in the graph. If not, raise error + # asking user to import the table. + # TODO: Make a `is_merge_table` helper, and check for false + # positives in the mixin init. for desc in parent.descendants(as_objects=True): if ( MERGE_PK not in desc.heading.names @@ -549,8 +585,154 @@ def delete(self, force_permission=False, *args, **kwargs): """Alias for cautious_delete, overwrites datajoint.table.Table.delete""" self.cautious_delete(force_permission=force_permission, *args, **kwargs) - def super_delete(self, *args, **kwargs): + def super_delete(self, warn=True, *args, **kwargs): """Alias for datajoint.table.Table.delete.""" - logger.warning("!! Using super_delete. Bypassing cautious_delete !!") - self._log_use(start=time(), super_delete=True) + if warn: + logger.warning("!! Bypassing cautious_delete !!") + self._log_use(start=time(), super_delete=True) super().delete(*args, **kwargs) + + # ------------------------------- Export Log ------------------------------- + + @cached_property + def _spyglass_version(self): + """Get Spyglass version from dj.config.""" + from spyglass import __version__ as sg_version + + return ".".join(sg_version.split(".")[:3]) # Major.Minor.Patch + + @cached_property + def _export_table(self): + """Lazy load export selection table.""" + from spyglass.common.common_usage import ExportSelection + + return ExportSelection() + + @property + def export_id(self): + """ID of export in progress. + + NOTE: User of an env variable to store export_id may not be thread safe. + Exports must be run in sequence, not parallel. + """ + + return int(environ.get(EXPORT_ENV_VAR, 0)) + + @export_id.setter + def export_id(self, value): + """Set ID of export using `table.export_id = X` notation.""" + if self.export_id != 0 and self.export_id != value: + raise RuntimeError("Export already in progress.") + environ[EXPORT_ENV_VAR] = str(value) + exit_register(self._export_id_cleanup) # End export on exit + + @export_id.deleter + def export_id(self): + """Delete ID of export using `del table.export_id` notation.""" + self._export_id_cleanup() + + def _export_id_cleanup(self): + """Cleanup export ID.""" + if environ.get(EXPORT_ENV_VAR): + del environ[EXPORT_ENV_VAR] + exit_unregister(self._export_id_cleanup) # Remove exit hook + + def _start_export(self, paper_id, analysis_id): + """Start export process.""" + if self.export_id: + logger.info(f"Export {self.export_id} in progress. Starting new.") + self._stop_export(warn=False) + + self.export_id = self._export_table.insert1_return_pk( + dict( + paper_id=paper_id, + analysis_id=analysis_id, + spyglass_version=self._spyglass_version, + ) + ) + + def _stop_export(self, warn=True): + """End export process.""" + if not self.export_id and warn: + logger.warning("Export not in progress.") + del self.export_id + + def _log_fetch(self, *args, **kwargs): + """Log fetch for export.""" + if not self.export_id or self.database == "common_usage": + return + + banned = [ + "head", # Prevents on Table().head() call + "tail", # Prevents on Table().tail() call + "preview", # Prevents on Table() call + "_repr_html_", # Prevents on Table() call in notebook + "cautious_delete", # Prevents add on permission check during delete + "get_abs_path", # Assumes that fetch_nwb will catch file/table + ] + called = [i.function for i in inspect_stack()] + if set(banned) & set(called): # if called by any in banned, return + return + + logger.debug(f"Export {self.export_id}: fetch() {self.table_name}") + + restr = self.restriction or True + limit = kwargs.get("limit") + offset = kwargs.get("offset") + if limit or offset: # Use result as restr if limit/offset + restr = self.restrict(restr).fetch( + log_fetch=False, as_dict=True, limit=limit, offset=offset + ) + + restr_str = make_condition(self, restr, set()) + + if isinstance(restr_str, str) and len(restr_str) > 2048: + raise RuntimeError( + "Export cannot handle restrictions > 2048.\n\t" + + "If required, please open an issue on GitHub.\n\t" + + f"Restriction: {restr_str}" + ) + self._export_table.Table.insert1( + dict( + export_id=self.export_id, + table_name=self.full_table_name, + restriction=restr_str, + ), + skip_duplicates=True, + ) + + def fetch(self, *args, log_fetch=True, **kwargs): + """Log fetch for export.""" + ret = super().fetch(*args, **kwargs) + if log_fetch: + self._log_fetch(*args, **kwargs) + return ret + + def fetch1(self, *args, log_fetch=True, **kwargs): + """Log fetch1 for export.""" + ret = super().fetch1(*args, **kwargs) + if log_fetch: + self._log_fetch(*args, **kwargs) + return ret + + # ------------------------- Other helper methods ------------------------- + + def _auto_increment(self, key, pk, *args, **kwargs): + """Auto-increment primary key.""" + if not key.get(pk): + key[pk] = (dj.U().aggr(self, n=f"max({pk})").fetch1("n") or 0) + 1 + return key + + def file_like(self, name=None, **kwargs): + """Convenience method for wildcard search on file name fields.""" + if not name: + return self & True + attr = None + for field in self.heading.names: + if "file" in field: + attr = field + break + if not attr: + logger.error(f"No file-like field found in {self.full_table_name}") + return + return self & f"{attr} LIKE '%{name}%'" From 696dee77bbaff9216f1e3f5306e1424e5429855a Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Fri, 19 Apr 2024 16:12:27 -0700 Subject: [PATCH 21/60] Spikesorting quality of life helpers (#910) * add utitlity function for finding spikesorting merge ids * add option to select v1 sorts that didn't go through artifact detection * add option to return merge keys as dicts for future restrictions * Add tool to get brain region and electrode info for a spikesorting merge id * update changelog * style cleanup * style cleanup * fix restriction bug for curation_id * account for change or radiu_um argument name in spikeinterface * only do joins with metric curastion tables if have relevant keys in the restriction * Update tutorial to use spikesorting merge table helper functions * fix spelling --- CHANGELOG.md | 1 + notebooks/10_Spike_SortingV1.ipynb | 1579 +++++++++++++++-- ...acting_Clusterless_Waveform_Features.ipynb | 1107 +++--------- notebooks/41_Decoding_Clusterless.ipynb | 247 ++- notebooks/42_Decoding_SortedSpikes.ipynb | 147 +- notebooks/py_scripts/10_Spike_SortingV1.py | 31 +- ...xtracting_Clusterless_Waveform_Features.py | 348 ++++ .../py_scripts/41_Decoding_Clusterless.py | 450 +++++ .../py_scripts/42_Decoding_SortedSpikes.py | 182 ++ .../spikesorting/spikesorting_merge.py | 117 +- .../spikesorting/v0/spikesorting_curation.py | 35 +- .../spikesorting/v0/spikesorting_sorting.py | 4 + src/spyglass/spikesorting/v1/curation.py | 43 +- src/spyglass/spikesorting/v1/sorting.py | 4 + 14 files changed, 3104 insertions(+), 1191 deletions(-) create mode 100644 notebooks/py_scripts/40_Extracting_Clusterless_Waveform_Features.py create mode 100644 notebooks/py_scripts/41_Decoding_Clusterless.py create mode 100644 notebooks/py_scripts/42_Decoding_SortedSpikes.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 05bf14719..e9ea13068 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - Spikesorting - Update calls in v0 pipeline for spikeinterface>=0.99 #893 - Fix method type of `get_spike_times` #904 + - Add helper functions for restricting spikesorting results and linking to probe info #910 - Decoding - Handle dimensions of clusterless `get_ahead_behind_distance` #904 - Fix improper handling of nwb file names with .strip #929 diff --git a/notebooks/10_Spike_SortingV1.ipynb b/notebooks/10_Spike_SortingV1.ipynb index e650f2cbc..e4bbbeb8e 100644 --- a/notebooks/10_Spike_SortingV1.ipynb +++ b/notebooks/10_Spike_SortingV1.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "5778bf96-740c-4e4b-a695-ed4385fc9b58", "metadata": { "tags": [] @@ -75,8 +75,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2024-03-22 09:25:28,835][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", - "[2024-03-22 09:25:28,874][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n" + "[2024-04-19 10:57:17,965][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", + "[2024-04-19 10:57:17,985][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n" ] } ], @@ -237,7 +237,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "8d659323", "metadata": {}, "outputs": [], @@ -273,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "a269f6af-eb16-4551-b511-a264368c9490", "metadata": {}, "outputs": [], @@ -291,10 +291,26 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "5b307631-3cc5-4859-9e95-aeedf6a3de56", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "{'nwb_file_name': 'minirec20230622_.nwb',\n", + " 'sort_group_id': 0,\n", + " 'preproc_param_name': 'default',\n", + " 'interval_list_name': '01_s1',\n", + " 'team_name': 'My Team',\n", + " 'recording_id': UUID('3450db49-28d5-4942-aa37-7c19126d16db')}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# define and insert a key for each sort group and interval you want to sort\n", "key = {\n", @@ -317,10 +333,107 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "3840f86a-8769-423e-8aeb-4d9ab694f1ef", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[10:57:43][INFO] Spyglass: Writing new NWB file minirec20230622_PTCFX77XOI.nwb\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/build/objectmapper.py:668: MissingRequiredBuildWarning: NWBFile 'root' is missing required value for attribute 'source_script_file_name'.\n", + " warnings.warn(msg, MissingRequiredBuildWarning)\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Processed recording.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "
\n", + "

recording_id

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

object_id

\n", + " Object ID for the processed recording in NWB file\n", + "
3450db49-28d5-4942-aa37-7c19126d16dbminirec20230622_PTCFX77XOI.nwb15592178-c317-4112-bfa6-b0943542e507
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*recording_id analysis_file_ object_id \n", + "+------------+ +------------+ +------------+\n", + "3450db49-28d5- minirec2023062 15592178-c317-\n", + " (Total: 1)" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Assuming 'key' is a dictionary with fields that you want to include in 'ssr_key'\n", "ssr_key = {\n", @@ -336,7 +449,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "1c6c7ea3-9538-4fa9-890b-ee16cc18af31", "metadata": {}, "outputs": [], @@ -362,10 +475,39 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "74415172-f2da-4fd3-ab43-01857d682b0d", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[10:57:52][INFO] Spyglass: Using 4 jobs...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "461383a7fb194d79b603244c4e371a98", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "detect_artifact_frames: 0%| | 0/2 [00:00\n", + " .Table{\n", + " border-collapse:collapse;\n", + " }\n", + " .Table th{\n", + " background: #A0A0A0; color: #ffffff; padding:4px; border:#f0e0e0 1px solid;\n", + " font-weight: normal; font-family: monospace; font-size: 100%;\n", + " }\n", + " .Table td{\n", + " padding:4px; border:#f0e0e0 1px solid; font-size:100%;\n", + " }\n", + " .Table tr:nth-child(odd){\n", + " background: #ffffff;\n", + " color: #000000;\n", + " }\n", + " .Table tr:nth-child(even){\n", + " background: #f3f1ff;\n", + " color: #000000;\n", + " }\n", + " /* Tooltip container */\n", + " .djtooltip {\n", + " }\n", + " /* Tooltip text */\n", + " .djtooltip .djtooltiptext {\n", + " visibility: hidden;\n", + " width: 120px;\n", + " background-color: black;\n", + " color: #fff;\n", + " text-align: center;\n", + " padding: 5px 0;\n", + " border-radius: 6px;\n", + " /* Position the tooltip text - see examples below! */\n", + " position: absolute;\n", + " z-index: 1;\n", + " }\n", + " #primary {\n", + " font-weight: bold;\n", + " color: black;\n", + " }\n", + " #nonprimary {\n", + " font-weight: normal;\n", + " color: white;\n", + " }\n", + "\n", + " /* Show the tooltip text when you mouse over the tooltip container */\n", + " .djtooltip:hover .djtooltiptext {\n", + " visibility: visible;\n", + " }\n", + " \n", + " \n", + " Detected artifacts (e.g. large transients from movement).\n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "

artifact_id

\n", + " \n", + "
0058dab4-41c1-42b1-91f4-5773f2ad36cc
01b39d37-3ff8-4907-9da6-9fec9baf87b5
035f0bae-80b3-4ce9-a767-94d336f36283
038ee778-6cf1-4e99-ab80-e354db5170c9
03e9768d-d101-4f56-abf9-5b0e3e1803b7
0490c820-c381-43b6-857e-f463147723ff
04a289c6-9e19-486a-a4cb-7e9638af225a
06dd7922-7042-4023-bebf-da1dacb0b6c7
07036486-e9f5-4dba-8662-7fb5ff2a6711
070ed448-a52d-478e-9102-0d04a6ed0b96
07a65788-bb89-48f3-90ea-4ab1add06eae
0a6611b3-c593-4900-a715-66bb1396940e
\n", + "

...

\n", + "

Total: 151

\n", + " " + ], + "text/plain": [ + "*artifact_id \n", + "+------------+\n", + "0058dab4-41c1-\n", + "01b39d37-3ff8-\n", + "035f0bae-80b3-\n", + "038ee778-6cf1-\n", + "03e9768d-d101-\n", + "0490c820-c381-\n", + "04a289c6-9e19-\n", + "06dd7922-7042-\n", + "07036486-e9f5-\n", + "070ed448-a52d-\n", + "07a65788-bb89-\n", + "0a6611b3-c593-\n", + " ...\n", + " (Total: 151)" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "sgs.ArtifactDetection()" ] @@ -419,7 +653,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "34246883-9dc4-43c5-a438-009215a3a35e", "metadata": {}, "outputs": [], @@ -452,67 +686,201 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "68856fb6-b5c2-4ee4-b300-43a117e453a1", "metadata": {}, - "outputs": [], - "source": [ - "sgs.SpikeSortingSelection.insert_selection(key)\n", - "sgs.SpikeSortingSelection() & key" - ] - }, - { - "cell_type": "markdown", - "id": "bb343fb7-04d6-48fc-bf67-9919769a7a52", - "metadata": {}, - "source": [ - "Once `SpikeSortingSelection` is populated, let's run `SpikeSorting.populate`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54ccf059-b1ae-42e8-aede-4af30a61fd2b", - "metadata": {}, - "outputs": [], - "source": [ - "sss_pk = (sgs.SpikeSortingSelection & key).proj()\n", - "\n", - "sgs.SpikeSorting.populate(sss_pk)" - ] - }, - { - "cell_type": "markdown", - "id": "f3d1e621", - "metadata": {}, - "source": [ - "The spike sorting results (spike times of detected units) are saved in an NWB file. We can access this in two ways. First, we can access it via the `fetch_nwb` method, which allows us to directly access the spike times saved in the `units` table of the NWB file. Second, we can access it as a `spikeinterface.NWBSorting` object. This allows us to take advantage of the rich APIs of `spikeinterface` to further analyze the sorting. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3d41d3ab", - "metadata": {}, - "outputs": [], - "source": [ - "sorting_nwb = (sgs.SpikeSorting & key).fetch_nwb()\n", - "sorting_si = sgs.SpikeSorting.get_sorting(key)" - ] - }, - { - "cell_type": "markdown", - "id": "db328eb1", - "metadata": {}, - "source": [ - "Note that the spike times of `fetch_nwb` is in units of seconds aligned with the timestamps of the recording. The spike times of the `spikeinterface.NWBSorting` object is in units of samples (as is generally true for sorting objects in `spikeinterface`)." - ] - }, - { - "cell_type": "markdown", - "id": "55d6c183", - "metadata": {}, - "source": [ + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Processed recording and spike sorting parameters. Use `insert_selection` method to insert rows.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

sorting_id

\n", + " \n", + "
\n", + "

recording_id

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_param_name

\n", + " \n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

interval_list_name

\n", + " descriptive name of this interval list\n", + "
16cbb873-052f-44f3-9f4d-89af3544915e3450db49-28d5-4942-aa37-7c19126d16dbmountainsort4franklab_tetrode_hippocampus_30KHzminirec20230622_.nwbf03513af-bff8-4732-a6ab-e53f0550e7b0
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*sorting_id recording_id sorter sorter_param_n nwb_file_name interval_list_\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "16cbb873-052f- 3450db49-28d5- mountainsort4 franklab_tetro minirec2023062 f03513af-bff8-\n", + " (Total: 1)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sgs.SpikeSortingSelection.insert_selection(key)\n", + "sgs.SpikeSortingSelection() & key" + ] + }, + { + "cell_type": "markdown", + "id": "bb343fb7-04d6-48fc-bf67-9919769a7a52", + "metadata": {}, + "source": [ + "Once `SpikeSortingSelection` is populated, let's run `SpikeSorting.populate`. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "54ccf059-b1ae-42e8-aede-4af30a61fd2b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mountainsort4 use the OLD spikeextractors mapped with NewToOldRecording\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[10:58:17][INFO] Spyglass: Writing new NWB file minirec20230622_PP6Y10VW0V.nwb\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/build/objectmapper.py:668: MissingRequiredBuildWarning: NWBFile 'root' is missing required value for attribute 'source_script_file_name'.\n", + " warnings.warn(msg, MissingRequiredBuildWarning)\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_PP6Y10VW0V.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/external.py:276: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n", + " if check_hash:\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/tempfile.py:821: ResourceWarning: Implicitly cleaning up \n", + " _warnings.warn(warn_message, ResourceWarning)\n" + ] + } + ], + "source": [ + "sss_pk = (sgs.SpikeSortingSelection & key).proj()\n", + "\n", + "sgs.SpikeSorting.populate(sss_pk)" + ] + }, + { + "cell_type": "markdown", + "id": "f3d1e621", + "metadata": {}, + "source": [ + "The spike sorting results (spike times of detected units) are saved in an NWB file. We can access this in two ways. First, we can access it via the `fetch_nwb` method, which allows us to directly access the spike times saved in the `units` table of the NWB file. Second, we can access it as a `spikeinterface.NWBSorting` object. This allows us to take advantage of the rich APIs of `spikeinterface` to further analyze the sorting. " + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3d41d3ab", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_PP6Y10VW0V.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + } + ], + "source": [ + "sorting_nwb = (sgs.SpikeSorting & key).fetch_nwb()\n", + "sorting_si = sgs.SpikeSorting.get_sorting(key)" + ] + }, + { + "cell_type": "markdown", + "id": "db328eb1", + "metadata": {}, + "source": [ + "Note that the spike times of `fetch_nwb` is in units of seconds aligned with the timestamps of the recording. The spike times of the `spikeinterface.NWBSorting` object is in units of samples (as is generally true for sorting objects in `spikeinterface`)." + ] + }, + { + "cell_type": "markdown", + "id": "55d6c183", + "metadata": {}, + "source": [ "## Automatic Curation" ] }, @@ -529,10 +897,41 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "6245eec9-3fba-4071-b58b-eec6d9345532", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[10:58:32][INFO] Spyglass: Writing new NWB file minirec20230622_SYPH1SYT75.nwb\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/build/objectmapper.py:668: MissingRequiredBuildWarning: NWBFile 'root' is missing required value for attribute 'source_script_file_name'.\n", + " warnings.warn(msg, MissingRequiredBuildWarning)\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_SYPH1SYT75.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/external.py:276: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n", + " if check_hash:\n" + ] + }, + { + "data": { + "text/plain": [ + "{'sorting_id': UUID('16cbb873-052f-44f3-9f4d-89af3544915e'),\n", + " 'curation_id': 0,\n", + " 'parent_curation_id': -1,\n", + " 'analysis_file_name': 'minirec20230622_SYPH1SYT75.nwb',\n", + " 'object_id': '3e4f927b-716f-4dd8-9c98-acd132d758fb',\n", + " 'merges_applied': False,\n", + " 'description': 'testing sort'}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "sgs.SpikeSortingRecording & key\n", "sgs.CurationV1.insert_curation(\n", @@ -545,10 +944,192 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "5bec5b97-4e9f-4ee9-a6b5-4f05f4726744", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Curation of a SpikeSorting. Use `insert_curation` to insert rows.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

sorting_id

\n", + " \n", + "
\n", + "

curation_id

\n", + " \n", + "
\n", + "

parent_curation_id

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

object_id

\n", + " \n", + "
\n", + "

merges_applied

\n", + " \n", + "
\n", + "

description

\n", + " \n", + "
021fb85a-992f-4360-99c7-e2da32c5b9cb0-1BS2820231107_8Z8CLG184Z.nwb37ee7365-028f-46e1-8351-1cd402a7b36c0testing sort
021fb85a-992f-4360-99c7-e2da32c5b9cb10BS2820231107_HPIQR9LZWU.nwb538032a5-5d29-4cb8-b0a2-7224fee6d8ce0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb20BS2820231107_SVW8YK84IP.nwbed440315-7302-4217-be15-087c7efeda7e0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb30BS2820231107_7CWR2JR68B.nwb0d8be667-2831-4e99-8c9b-54102de48e850after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb40BS2820231107_1PCRTB2UZ2.nwb9f9e9a1e-9be3-405c-9c66-4bf6dc54d4d90after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb50BS2820231107_4NPZ4YTASV.nwb89170a28-487a-4787-83dd-18009c4467000after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb60BS2820231107_MMSIJ8YQ54.nwbc9fb8c88-6449-4d9a-a40a-cd10dcdc193f0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb70BS2820231107_LZJWQPP1YW.nwbf078e3bb-92fc-4e7f-b3a8-32936a90e0570after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb80BS2820231107_RJ7DLUKOIG.nwbc311fbfb-cd3d-4d92-b535-b5da3d4a6ec30after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb90BS2820231107_6ZJP5NRCX9.nwba54ee3f8-851a-4dca-bb46-7673e28074620after metric curation
03dc29a5-febe-4a59-ab61-21a25dea36250-1j1620210710_EOE1VZ4YAX.nwb52889e86-c249-4916-9576-a9ccf7f48dbe0
061ba57b-d2cb-4052-b375-42ba13684e410-1BS2820231107_S21IIVRCZA.nwb5d71500d-1065-4610-b3a6-746821d0f4380testing sort
\n", + "

...

\n", + "

Total: 626

\n", + " " + ], + "text/plain": [ + "*sorting_id *curation_id parent_curatio analysis_file_ object_id merges_applied description \n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "021fb85a-992f- 0 -1 BS2820231107_8 37ee7365-028f- 0 testing sort \n", + "021fb85a-992f- 1 0 BS2820231107_H 538032a5-5d29- 0 after metric c\n", + "021fb85a-992f- 2 0 BS2820231107_S ed440315-7302- 0 after metric c\n", + "021fb85a-992f- 3 0 BS2820231107_7 0d8be667-2831- 0 after metric c\n", + "021fb85a-992f- 4 0 BS2820231107_1 9f9e9a1e-9be3- 0 after metric c\n", + "021fb85a-992f- 5 0 BS2820231107_4 89170a28-487a- 0 after metric c\n", + "021fb85a-992f- 6 0 BS2820231107_M c9fb8c88-6449- 0 after metric c\n", + "021fb85a-992f- 7 0 BS2820231107_L f078e3bb-92fc- 0 after metric c\n", + "021fb85a-992f- 8 0 BS2820231107_R c311fbfb-cd3d- 0 after metric c\n", + "021fb85a-992f- 9 0 BS2820231107_6 a54ee3f8-851a- 0 after metric c\n", + "03dc29a5-febe- 0 -1 j1620210710_EO 52889e86-c249- 0 \n", + "061ba57b-d2cb- 0 -1 BS2820231107_S 5d71500d-1065- 0 testing sort \n", + " ...\n", + " (Total: 626)" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "sgs.CurationV1()" ] @@ -563,7 +1144,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "7207abda-ea84-43af-97d4-e5be3464d28d", "metadata": {}, "outputs": [], @@ -581,10 +1162,110 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "id": "14c2eacc-cc45-4e61-9919-04785a721079", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Spike sorting and parameters for metric curation. Use `insert_selection` to insert a row into this table.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

metric_curation_id

\n", + " \n", + "
\n", + "

sorting_id

\n", + " \n", + "
\n", + "

curation_id

\n", + " \n", + "
\n", + "

waveform_param_name

\n", + " name of waveform extraction parameters\n", + "
\n", + "

metric_param_name

\n", + " \n", + "
\n", + "

metric_curation_param_name

\n", + " \n", + "
5bd75cd5-cc2e-41dd-9056-5d62fa46021a16cbb873-052f-44f3-9f4d-89af3544915e0default_not_whitenedfranklab_defaultdefault
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*metric_curati sorting_id curation_id waveform_param metric_param_n metric_curatio\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "5bd75cd5-cc2e- 16cbb873-052f- 0 default_not_wh franklab_defau default \n", + " (Total: 1)" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "sgs.MetricCurationSelection.insert_selection(key)\n", "sgs.MetricCurationSelection() & key" @@ -592,12 +1273,100 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "id": "d22f5725-4fd1-42ea-a1d4-590bd1353d46", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Results of applying curation based on quality metrics. To do additional curation, insert another row in `CurationV1`\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "
\n", + "

metric_curation_id

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

object_id

\n", + " Object ID for the metrics in NWB file\n", + "
5bd75cd5-cc2e-41dd-9056-5d62fa46021aminirec20230622_PVSMM7XHHJ.nwb01b58a59-1b49-4bd1-a204-16fb09d67b2a
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*metric_curati analysis_file_ object_id \n", + "+------------+ +------------+ +------------+\n", + "5bd75cd5-cc2e- minirec2023062 01b58a59-1b49-\n", + " (Total: 1)" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "sgs.MetricCuration.populate()\n", + "sgs.MetricCuration.populate(key)\n", "sgs.MetricCuration() & key" ] }, @@ -611,10 +1380,41 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "id": "544ba8c0-560e-471b-9eaf-5924f6051faa", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[11:08:29][INFO] Spyglass: Writing new NWB file minirec20230622_ZCMODPF1NM.nwb\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/hdmf/build/objectmapper.py:668: MissingRequiredBuildWarning: NWBFile 'root' is missing required value for attribute 'source_script_file_name'.\n", + " warnings.warn(msg, MissingRequiredBuildWarning)\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/hash.py:39: ResourceWarning: unclosed file <_io.BufferedReader name='/stelmo/nwb/analysis/minirec20230622/minirec20230622_ZCMODPF1NM.nwb'>\n", + " return uuid_from_stream(Path(filepath).open(\"rb\"), init_string=init_string)\n", + "ResourceWarning: Enable tracemalloc to get the object allocation traceback\n", + "/home/sambray/mambaforge-pypy3/envs/spyglass/lib/python3.9/site-packages/datajoint/external.py:276: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n", + " if check_hash:\n" + ] + }, + { + "data": { + "text/plain": [ + "{'sorting_id': UUID('16cbb873-052f-44f3-9f4d-89af3544915e'),\n", + " 'curation_id': 1,\n", + " 'parent_curation_id': 0,\n", + " 'analysis_file_name': 'minirec20230622_ZCMODPF1NM.nwb',\n", + " 'object_id': 'c43cd7ab-e5bd-4528-a0e5-0ca7c337a72d',\n", + " 'merges_applied': False,\n", + " 'description': 'after metric curation'}" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "key = {\n", " \"metric_curation_id\": (\n", @@ -639,10 +1439,192 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "id": "f7c6bfd9-5985-41e1-bf37-8c8874b59191", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Curation of a SpikeSorting. Use `insert_curation` to insert rows.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

sorting_id

\n", + " \n", + "
\n", + "

curation_id

\n", + " \n", + "
\n", + "

parent_curation_id

\n", + " \n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

object_id

\n", + " \n", + "
\n", + "

merges_applied

\n", + " \n", + "
\n", + "

description

\n", + " \n", + "
021fb85a-992f-4360-99c7-e2da32c5b9cb0-1BS2820231107_8Z8CLG184Z.nwb37ee7365-028f-46e1-8351-1cd402a7b36c0testing sort
021fb85a-992f-4360-99c7-e2da32c5b9cb10BS2820231107_HPIQR9LZWU.nwb538032a5-5d29-4cb8-b0a2-7224fee6d8ce0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb20BS2820231107_SVW8YK84IP.nwbed440315-7302-4217-be15-087c7efeda7e0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb30BS2820231107_7CWR2JR68B.nwb0d8be667-2831-4e99-8c9b-54102de48e850after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb40BS2820231107_1PCRTB2UZ2.nwb9f9e9a1e-9be3-405c-9c66-4bf6dc54d4d90after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb50BS2820231107_4NPZ4YTASV.nwb89170a28-487a-4787-83dd-18009c4467000after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb60BS2820231107_MMSIJ8YQ54.nwbc9fb8c88-6449-4d9a-a40a-cd10dcdc193f0after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb70BS2820231107_LZJWQPP1YW.nwbf078e3bb-92fc-4e7f-b3a8-32936a90e0570after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb80BS2820231107_RJ7DLUKOIG.nwbc311fbfb-cd3d-4d92-b535-b5da3d4a6ec30after metric curation
021fb85a-992f-4360-99c7-e2da32c5b9cb90BS2820231107_6ZJP5NRCX9.nwba54ee3f8-851a-4dca-bb46-7673e28074620after metric curation
03dc29a5-febe-4a59-ab61-21a25dea36250-1j1620210710_EOE1VZ4YAX.nwb52889e86-c249-4916-9576-a9ccf7f48dbe0
061ba57b-d2cb-4052-b375-42ba13684e410-1BS2820231107_S21IIVRCZA.nwb5d71500d-1065-4610-b3a6-746821d0f4380testing sort
\n", + "

...

\n", + "

Total: 627

\n", + " " + ], + "text/plain": [ + "*sorting_id *curation_id parent_curatio analysis_file_ object_id merges_applied description \n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "021fb85a-992f- 0 -1 BS2820231107_8 37ee7365-028f- 0 testing sort \n", + "021fb85a-992f- 1 0 BS2820231107_H 538032a5-5d29- 0 after metric c\n", + "021fb85a-992f- 2 0 BS2820231107_S ed440315-7302- 0 after metric c\n", + "021fb85a-992f- 3 0 BS2820231107_7 0d8be667-2831- 0 after metric c\n", + "021fb85a-992f- 4 0 BS2820231107_1 9f9e9a1e-9be3- 0 after metric c\n", + "021fb85a-992f- 5 0 BS2820231107_4 89170a28-487a- 0 after metric c\n", + "021fb85a-992f- 6 0 BS2820231107_M c9fb8c88-6449- 0 after metric c\n", + "021fb85a-992f- 7 0 BS2820231107_L f078e3bb-92fc- 0 after metric c\n", + "021fb85a-992f- 8 0 BS2820231107_R c311fbfb-cd3d- 0 after metric c\n", + "021fb85a-992f- 9 0 BS2820231107_6 a54ee3f8-851a- 0 after metric c\n", + "03dc29a5-febe- 0 -1 j1620210710_EO 52889e86-c249- 0 \n", + "061ba57b-d2cb- 0 -1 BS2820231107_S 5d71500d-1065- 0 testing sort \n", + " ...\n", + " (Total: 627)" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "sgs.CurationV1()" ] @@ -652,7 +1634,7 @@ "id": "a627274b", "metadata": {}, "source": [ - "## Manual Curation" + "## Manual Curation (Optional)" ] }, { @@ -797,15 +1779,124 @@ "id": "9ff6aff5-7020-40d6-832f-006d66d54a7e", "metadata": {}, "source": [ - "We now insert the curated spike sorting to a `Merge` table for feeding into downstream processing pipelines.\n" + "## Downstream usage (Merge table)\n", + "\n", + "Regardless of Curation method used, to make use of spikeorting results in downstream pipelines like Decoding, we will need to insert it into the `SpikeSortingOutput` merge table. " ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "id": "511ecb19-7d8d-4db6-be71-c0ed66e2b0f2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " Output of spike sorting pipelines.\n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

merge_id

\n", + " \n", + "
\n", + "

source

\n", + " \n", + "
0001a1ab-7c2b-1085-2062-53c0338ffe22CuratedSpikeSorting
000c5d0b-1c4c-55d1-ccf6-5808f57152d3CuratedSpikeSorting
0015e01d-0dc0-ca2c-1f5c-2178fa2c7f1eCuratedSpikeSorting
001628b1-0af1-7c74-a211-0e5c158ba10fCuratedSpikeSorting
001783f0-c5da-98c2-5b2a-63f1334c0a43CuratedSpikeSorting
0020b039-6a2d-1d68-6585-4866fb7ea266CuratedSpikeSorting
002be77b-38a6-fff8-cb48-a81e20ccb51bCuratedSpikeSorting
002da11c-2d16-a6dc-0468-980674ca12b0CuratedSpikeSorting
003bf29a-fa09-05be-5cac-b7ea70a48c0cCuratedSpikeSorting
003cabf2-c471-972a-4b18-63d4ab7e1b8bCuratedSpikeSorting
004d99c6-1b2e-1696-fc85-e78ac5cc7e6bCuratedSpikeSorting
004faf9a-72cb-4416-ae13-3f85d538604fCuratedSpikeSorting
\n", + "

...

\n", + "

Total: 8684

\n", + " " + ], + "text/plain": [ + "*merge_id source \n", + "+------------+ +------------+\n", + "0001a1ab-7c2b- CuratedSpikeSo\n", + "000c5d0b-1c4c- CuratedSpikeSo\n", + "0015e01d-0dc0- CuratedSpikeSo\n", + "001628b1-0af1- CuratedSpikeSo\n", + "001783f0-c5da- CuratedSpikeSo\n", + "0020b039-6a2d- CuratedSpikeSo\n", + "002be77b-38a6- CuratedSpikeSo\n", + "002da11c-2d16- CuratedSpikeSo\n", + "003bf29a-fa09- CuratedSpikeSo\n", + "003cabf2-c471- CuratedSpikeSo\n", + "004d99c6-1b2e- CuratedSpikeSo\n", + "004faf9a-72cb- CuratedSpikeSo\n", + " ...\n", + " (Total: 8684)" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput\n", "\n", @@ -814,23 +1905,313 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 52, "id": "b20c2c9e-0c97-4669-b45d-4b1c50fd2fcc", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "*merge_id *source *sorting_id *curation_id *nwb_file_name *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +--------+ +------------+ +------------+\n", + "d76584f8-0969- CurationV1 03dc29a5-febe- 0 None 0 None None None None None None \n", + "33d71671-63e5- CurationV1 090377fb-72b7- 0 None 0 None None None None None None \n", + "dfa87e8e-c5cf- CurationV1 0cf93833-6a14- 0 None 0 None None None None None None \n", + "a6cc0a23-7e29- CurationV1 110e27f6-5ffa- 0 None 0 None None None None None None \n", + "7f8841a6-5e27- CurationV1 16cbb873-052f- 1 None 0 None None None None None None \n", + "91e8e8d8-1568- CurationV1 21bea0ea-3084- 0 None 0 None None None None None None \n", + "218c17c7-8a4c- CurationV1 21bea0ea-3084- 1 None 0 None None None None None None \n", + "25823222-85ed- CurationV1 2484ee5d-0819- 0 None 0 None None None None None None \n", + "5ae79d97-6a99- CurationV1 3046a016-1613- 0 None 0 None None None None None None \n", + "869072e1-76d6- CurationV1 41a13836-e128- 0 None 0 None None None None None None \n", + "a0771d6c-fc9d- CurationV1 4bc61e94-5bf9- 0 None 0 None None None None None None \n", + "ed70dacb-a637- CurationV1 5d15f94e-d53d- 0 None 0 None None None None None None \n", + " ...\n", + " (Total: 0)\n", + "\n" + ] + } + ], "source": [ - "SpikeSortingOutput.insert([key], part_name=\"CurationV1\")\n", + "# insert the automatic curation spikesorting results\n", + "curation_key = sss_pk.fetch1(\"KEY\")\n", + "curation_key[\"curation_id\"] = 1\n", + "merge_insert_key = (sgs.CurationV1 & curation_key).fetch(\"KEY\", as_dict=True)\n", + "SpikeSortingOutput.insert(merge_insert_key, part_name=\"CurationV1\")\n", "SpikeSortingOutput.merge_view()" ] }, + { + "cell_type": "markdown", + "id": "a8ab3ed2", + "metadata": {}, + "source": [ + "Finding the merge id's corresponding to an interpretable restriction such as `merge_id` or `interval_list` can require several join steps with upstream tables. To simplify this process we can use the included helper function `SpikeSortingOutput().get_restricted_merge_ids()` to perform the necessary joins and return the matching merge id's" + ] + }, { "cell_type": "code", - "execution_count": null, - "id": "184c3401-8df3-46f0-9dd0-c9fa98395c34", + "execution_count": 6, + "id": "3925b5de", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[13:34:12][WARNING] Spyglass: V0 requires artifact restrict. Ignoring \"restrict_by_artifact\" flag.\n" + ] + }, + { + "data": { + "text/plain": [ + "[{'merge_id': UUID('74c006e8-dcfe-e994-7b40-73f8d9f75b85')}]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "selection_key = {\n", + " \"nwb_file_name\": nwb_file_name2,\n", + " \"sorter\": \"mountainsort4\",\n", + " \"interval_list_name\": \"01_s1\",\n", + " \"curation_id\": 0,\n", + "} # this function can use restrictions from throughout the spikesorting pipeline\n", + "spikesorting_merge_ids = SpikeSortingOutput().get_restricted_merge_ids(\n", + " selection_key, as_dict=True\n", + ")\n", + "spikesorting_merge_ids" + ] + }, + { + "cell_type": "markdown", + "id": "007fbb60", + "metadata": {}, + "source": [ + "With the spikesorting merge_ids we want we can also use the method `get_sort_group_info` to get a table linking the merge id to the electrode group it is sourced from. This can be helpful for restricting to just electrodes from a brain area of interest" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "696345db", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

merge_id

\n", + " \n", + "
\n", + "

nwb_file_name

\n", + " name of the NWB file\n", + "
\n", + "

electrode_group_name

\n", + " electrode group name from NWBFile\n", + "
\n", + "

electrode_id

\n", + " the unique number for this electrode\n", + "
\n", + "

curation_id

\n", + " a number correponding to the index of this curation\n", + "
\n", + "

sort_group_id

\n", + " identifier for a group of electrodes\n", + "
\n", + "

sort_interval_name

\n", + " name for this interval\n", + "
\n", + "

preproc_params_name

\n", + " \n", + "
\n", + "

team_name

\n", + " \n", + "
\n", + "

sorter

\n", + " \n", + "
\n", + "

sorter_params_name

\n", + " \n", + "
\n", + "

artifact_removed_interval_list_name

\n", + " \n", + "
\n", + "

region_id

\n", + " \n", + "
\n", + "

probe_id

\n", + " \n", + "
\n", + "

probe_shank

\n", + " shank number within probe\n", + "
\n", + "

probe_electrode

\n", + " electrode\n", + "
\n", + "

name

\n", + " unique label for each contact\n", + "
\n", + "

original_reference_electrode

\n", + " the configured reference electrode for this electrode\n", + "
\n", + "

x

\n", + " the x coordinate of the electrode position in the brain\n", + "
\n", + "

y

\n", + " the y coordinate of the electrode position in the brain\n", + "
\n", + "

z

\n", + " the z coordinate of the electrode position in the brain\n", + "
\n", + "

filtering

\n", + " description of the signal filtering\n", + "
\n", + "

impedance

\n", + " electrode impedance\n", + "
\n", + "

bad_channel

\n", + " if electrode is \"good\" or \"bad\" as observed during recording\n", + "
\n", + "

x_warped

\n", + " x coordinate of electrode position warped to common template brain\n", + "
\n", + "

y_warped

\n", + " y coordinate of electrode position warped to common template brain\n", + "
\n", + "

z_warped

\n", + " z coordinate of electrode position warped to common template brain\n", + "
\n", + "

contacts

\n", + " label of electrode contacts used for a bipolar signal - current workaround\n", + "
\n", + "

analysis_file_name

\n", + " name of the file\n", + "
\n", + "

units_object_id

\n", + " \n", + "
\n", + "

region_name

\n", + " the name of the brain region\n", + "
\n", + "

subregion_name

\n", + " subregion name\n", + "
\n", + "

subsubregion_name

\n", + " subregion within subregion\n", + "
662f3e35-c81e-546c-69c3-b3a2f5ed2776minirec20230622_.nwb001001_s1_first9default_hippocampusMy Teammountainsort4hippocampus_tutorialminirec20230622_.nwb_01_s1_first9_0_default_hippocampus_none_artifact_removed_valid_times35tetrode_12.500000.00.00.0None0.0False0.00.00.0minirec20230622_RXRSAFCGVJ.nwbcorpus callosum and associated subcortical white matter (cc-ec-cing-dwm)NoneNone
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*merge_id *nwb_file_name *electrode_gro *electrode_id *curation_id *sort_group_id *sort_interval *preproc_param *team_name *sorter *sorter_params *artifact_remo *region_id probe_id probe_shank probe_electrod name original_refer x y z filtering impedance bad_channel x_warped y_warped z_warped contacts analysis_file_ units_object_i region_name subregion_name subsubregion_n\n", + "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +-----------+ +------------+ +------------+ +------------+ +------+ +------------+ +-----+ +-----+ +-----+ +-----------+ +-----------+ +------------+ +----------+ +----------+ +----------+ +----------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", + "662f3e35-c81e- minirec2023062 0 0 1 0 01_s1_first9 default_hippoc My Team mountainsort4 hippocampus_tu minirec2023062 35 tetrode_12.5 0 0 0 0 0.0 0.0 0.0 None 0.0 False 0.0 0.0 0.0 minirec2023062 corpus callosu None None \n", + " (Total: 1)" + ] + }, + "execution_count": 60, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "SpikeSortingOutput.CurationV1()" + "merge_keys = [{\"merge_id\": str(id)} for id in spikesorting_merge_ids]\n", + "SpikeSortingOutput().get_sort_group_info(merge_keys)" ] } ], diff --git a/notebooks/40_Extracting_Clusterless_Waveform_Features.ipynb b/notebooks/40_Extracting_Clusterless_Waveform_Features.ipynb index 820da6496..07b3130a5 100644 --- a/notebooks/40_Extracting_Clusterless_Waveform_Features.ipynb +++ b/notebooks/40_Extracting_Clusterless_Waveform_Features.ipynb @@ -57,9 +57,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2024-01-17 22:14:51,194][INFO]: Connecting root@localhost:3306\n", - "[2024-01-17 22:14:51,274][INFO]: Connected root@localhost:3306\n", - "/Users/edeno/Documents/GitHub/spyglass/src/spyglass/data_import/insert_sessions.py:58: UserWarning: Cannot insert data from mediumnwb20230802.nwb: mediumnwb20230802_.nwb is already in Nwbfile table.\n", + "[2024-04-19 10:37:45,302][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", + "[2024-04-19 10:37:45,330][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n", + "/home/sambray/Documents/spyglass/src/spyglass/data_import/insert_sessions.py:58: UserWarning: Cannot insert data from mediumnwb20230802.nwb: mediumnwb20230802_.nwb is already in Nwbfile table.\n", " warnings.warn(\n" ] } @@ -111,30 +111,30 @@ "name": "stderr", "output_type": "stream", "text": [ - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:55][WARNING] Spyglass: Similar row(s) already inserted.\n" + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:53][WARNING] Spyglass: Similar row(s) already inserted.\n" ] } ], @@ -178,30 +178,39 @@ "name": "stderr", "output_type": "stream", "text": [ - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][WARNING] Spyglass: Similar row(s) already inserted.\n" + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n", + "[10:37:56][WARNING] Spyglass: Similar row(s) already inserted.\n" ] } ], @@ -234,34 +243,72 @@ "execution_count": 5, "metadata": {}, "outputs": [ + { + "data": { + "text/plain": [ + "{'sorter': 'clusterless_thresholder',\n", + " 'sorter_param_name': 'default_clusterless',\n", + " 'sorter_params': {'detect_threshold': 100.0,\n", + " 'method': 'locally_exclusive',\n", + " 'peak_sign': 'neg',\n", + " 'exclude_sweep_ms': 0.1,\n", + " 'local_radius_um': 100,\n", + " 'noise_levels': array([1.]),\n", + " 'random_chunk_kwargs': {},\n", + " 'outputs': 'sorting'}}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "(sgs.SpikeSorterParameters() & {\"sorter\": \"clusterless_thresholder\"}).fetch1()" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n", - "[22:14:56][INFO] Spyglass: Similar row(s) already inserted.\n" + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:38][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n", + "[12:03:39][INFO] Spyglass: Similar row(s) already inserted.\n" ] } ], @@ -275,14 +322,15 @@ " \"nwb_file_name\": nwb_copy_file_name,\n", " \"interval_list_name\": str(\n", " (\n", - " sgs.ArtifactDetectionSelection & {\"recording_id\": recording_id}\n", + " sgs.ArtifactDetectionSelection\n", + " & {\"recording_id\": recording_id, \"artifact_param_name\": \"none\"}\n", " ).fetch1(\"artifact_id\")\n", " ),\n", " }\n", " group_keys.append(key)\n", " sgs.SpikeSortingSelection.insert_selection(key)\n", - "\n", - "sgs.SpikeSorting.populate(group_keys)" + "sort_keys = (sgs.SpikeSortingSelection & group_keys).fetch(\"KEY\")\n", + "sgs.SpikeSorting.populate(sort_keys)" ] }, { @@ -294,14 +342,63 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b9e53383606f4cbebdb5f8ccf1b56878", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "detect peaks using locally_exclusive: 0%| | 0/1476 [00:00features_param_name

\n", " a name for this set of parameters\n", " \n", - " 0751a1e1-a406-7f87-ae6f-ce4ffc60621c\n", - "amplitude485a4ddf-332d-35b5-3ad4-0561736c1844\n", - "amplitude4a712103-c223-864f-82e0-6c23de79cc14\n", - "amplitude4a72c253-b3ca-8c13-e615-736a7ebff35c\n", - "amplitude5c53bd33-d57c-fbba-e0fb-55e0bcb85d03\n", - "amplitude614d796c-0b95-6364-aaa0-b6cb1e7bbb83\n", - "amplitude6acb99b8-6a0c-eb83-1141-5f603c5895e0\n", - "amplitude6d039a63-17ad-0b78-4b1e-f02d5f3dbbc5\n", - "amplitude74e10781-1228-4075-0870-af224024ffdc\n", - "amplitude7e3fa66e-727e-1541-819a-b01309bb30ae\n", - "amplitude86897349-ff68-ac72-02eb-739dd88936e6\n", - "amplitude8bbddc0f-d6ae-6260-9400-f884a6e25ae8\n", + " 003bf29a-fa09-05be-5cac-b7ea70a48c0c\n", + "amplitude004faf9a-72cb-4416-ae13-3f85d538604f\n", + "amplitude0061a9df-3954-99d4-d738-fd13ab7119fe\n", + "amplitude00775472-67a6-5836-b68a-d15186ae3b3c\n", + "amplitude00a1861f-bbf0-5e78-3dc6-36551b2657b0\n", + "amplitude00a9f0d0-b682-2b12-6a2b-08e4129291ce\n", + "amplitude00bd2bbf-ccdb-7be3-f1a0-5e337d87a5a4\n", + "amplitude00bdda4f-7059-6c72-c571-a80ad323fda2\n", + "amplitude00db0baa-4ec0-3d20-897a-ea4a067ebbba\n", + "amplitude00e5a16b-c4f2-e8dc-3083-17f542dadc36\n", + "amplitude00f25c1b-d5a3-6ca6-c501-ef0e544f6284\n", + "amplitude012fdb25-bd7e-aedd-c41b-bb7e177ceeb8\n", "amplitude \n", " \n", "

...

\n", - "

Total: 23

\n", + "

Total: 3504

\n", " " ], "text/plain": [ "*spikesorting_ *features_para\n", "+------------+ +------------+\n", - "0751a1e1-a406- amplitude \n", - "485a4ddf-332d- amplitude \n", - "4a712103-c223- amplitude \n", - "4a72c253-b3ca- amplitude \n", - "5c53bd33-d57c- amplitude \n", - "614d796c-0b95- amplitude \n", - "6acb99b8-6a0c- amplitude \n", - "6d039a63-17ad- amplitude \n", - "74e10781-1228- amplitude \n", - "7e3fa66e-727e- amplitude \n", - "86897349-ff68- amplitude \n", - "8bbddc0f-d6ae- amplitude \n", + "003bf29a-fa09- amplitude \n", + "004faf9a-72cb- amplitude \n", + "0061a9df-3954- amplitude \n", + "00775472-67a6- amplitude \n", + "00a1861f-bbf0- amplitude \n", + "00a9f0d0-b682- amplitude \n", + "00bd2bbf-ccdb- amplitude \n", + "00bdda4f-7059- amplitude \n", + "00db0baa-4ec0- amplitude \n", + "00e5a16b-c4f2- amplitude \n", + "00f25c1b-d5a3- amplitude \n", + "012fdb25-bd7e- amplitude \n", " ...\n", - " (Total: 23)" + " (Total: 3504)" ] }, - "execution_count": 8, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -619,59 +726,38 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First we find the units we need:\n" + "First we find the units we need. We can use the method `SpikeSortingOutput.get_restricted_merge_ids()` to perform the needed joins to find them:\n" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([UUID('485a4ddf-332d-35b5-3ad4-0561736c1844'),\n", - " UUID('6acb99b8-6a0c-eb83-1141-5f603c5895e0'),\n", - " UUID('f7237e18-4e73-4aee-805b-90735e9147de'),\n", - " UUID('7e3fa66e-727e-1541-819a-b01309bb30ae'),\n", - " UUID('6d039a63-17ad-0b78-4b1e-f02d5f3dbbc5'),\n", - " UUID('e0e9133a-7a4e-1321-a43a-e8afcb2f25da'),\n", - " UUID('9959b614-2318-f597-6651-a3a82124d28a'),\n", - " UUID('c0eb6455-fc41-c200-b62e-e3ca81b9a3f7'),\n", - " UUID('912e250e-56d8-ee33-4525-c844d810971b'),\n", - " UUID('d7d2c97a-0e6e-d1b8-735c-d55dc66a30e1'),\n", - " UUID('abb92dce-4410-8f17-a501-a4104bda0dcf'),\n", - " UUID('74e10781-1228-4075-0870-af224024ffdc'),\n", - " UUID('8bbddc0f-d6ae-6260-9400-f884a6e25ae8'),\n", - " UUID('614d796c-0b95-6364-aaa0-b6cb1e7bbb83'),\n", - " UUID('b332482b-e430-169d-8ac0-0a73ce968ed7'),\n", - " UUID('86897349-ff68-ac72-02eb-739dd88936e6'),\n", - " UUID('4a712103-c223-864f-82e0-6c23de79cc14'),\n", - " UUID('cf858380-e8a3-49de-c2a9-1a277e307a68'),\n", - " UUID('cc4ee561-f974-f8e5-0ea4-83185263ac67'),\n", - " UUID('4a72c253-b3ca-8c13-e615-736a7ebff35c'),\n", - " UUID('b92a94d8-ee1e-2097-a81f-5c1e1556ed24'),\n", - " UUID('5c53bd33-d57c-fbba-e0fb-55e0bcb85d03'),\n", - " UUID('0751a1e1-a406-7f87-ae6f-ce4ffc60621c')], dtype=object)" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput\n", - "\n", - "merge_ids = (\n", - " (SpikeSortingOutput.CurationV1 * sgs.SpikeSortingSelection)\n", - " & {\n", + "nwb_copy_file_name = \"mediumnwb20230802_.nwb\"\n", + "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "key = {\n", + " \"nwb_file_name\": nwb_copy_file_name,\n", + " \"sorter\": \"clusterless_thresholder\",\n", + " \"sorter_param_name\": \"default_clusterless\",\n", + "}\n", + "merge_ids = SpikeSortingOutput().get_restricted_merge_ids(\n", + " {\n", " \"nwb_file_name\": nwb_copy_file_name,\n", " \"sorter\": \"clusterless_thresholder\",\n", " \"sorter_param_name\": \"default_clusterless\",\n", - " }\n", - ").fetch(\"merge_id\")\n", - "merge_ids" + " },\n", + " sources=[\"v1\"],\n", + ")" ] }, { @@ -683,7 +769,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -751,44 +837,44 @@ "

features_param_name

\n", " a name for this set of parameters\n", " \n", - " 0751a1e1-a406-7f87-ae6f-ce4ffc60621c\n", - "amplitude485a4ddf-332d-35b5-3ad4-0561736c1844\n", - "amplitude4a712103-c223-864f-82e0-6c23de79cc14\n", - "amplitude4a72c253-b3ca-8c13-e615-736a7ebff35c\n", - "amplitude5c53bd33-d57c-fbba-e0fb-55e0bcb85d03\n", - "amplitude614d796c-0b95-6364-aaa0-b6cb1e7bbb83\n", - "amplitude6acb99b8-6a0c-eb83-1141-5f603c5895e0\n", - "amplitude6d039a63-17ad-0b78-4b1e-f02d5f3dbbc5\n", - "amplitude74e10781-1228-4075-0870-af224024ffdc\n", - "amplitude7e3fa66e-727e-1541-819a-b01309bb30ae\n", - "amplitude86897349-ff68-ac72-02eb-739dd88936e6\n", - "amplitude8bbddc0f-d6ae-6260-9400-f884a6e25ae8\n", + " 0233e49a-b849-7eab-7434-9c298eea87b8\n", + "amplitude07239cea-7578-5409-692c-18c9d26b4d36\n", + "amplitude08be9775-370d-6492-0b4e-a5db4ce7a128\n", + "amplitude11819f33-11d5-f0f8-2590-ce3d60b76f3a\n", + "amplitude1c2ea289-2e7f-dcda-0464-ce97d3d6a392\n", + "amplitude20f24092-d191-0c58-55c8-d43d453f9fd4\n", + "amplitude2598b48e-49a0-3389-dd15-0230e8d326e4\n", + "amplitude483055a5-9775-27b7-856e-01543bd920aa\n", + "amplitude50ae3f7e-65a8-5fc2-5304-ab534b90fa46\n", + "amplitude50b29d01-2d74-e37e-2842-ad56d833c5f9\n", + "amplitude5e756e76-68be-21b7-7764-cb78d9aa4ef8\n", + "amplitude67f156e1-5da7-9c89-03b1-cc2dba88dacd\n", "amplitude \n", " \n", "

...

\n", - "

Total: 23

\n", + "

Total: 26

\n", " " ], "text/plain": [ "*spikesorting_ *features_para\n", "+------------+ +------------+\n", - "0751a1e1-a406- amplitude \n", - "485a4ddf-332d- amplitude \n", - "4a712103-c223- amplitude \n", - "4a72c253-b3ca- amplitude \n", - "5c53bd33-d57c- amplitude \n", - "614d796c-0b95- amplitude \n", - "6acb99b8-6a0c- amplitude \n", - "6d039a63-17ad- amplitude \n", - "74e10781-1228- amplitude \n", - "7e3fa66e-727e- amplitude \n", - "86897349-ff68- amplitude \n", - "8bbddc0f-d6ae- amplitude \n", + "0233e49a-b849- amplitude \n", + "07239cea-7578- amplitude \n", + "08be9775-370d- amplitude \n", + "11819f33-11d5- amplitude \n", + "1c2ea289-2e7f- amplitude \n", + "20f24092-d191- amplitude \n", + "2598b48e-49a0- amplitude \n", + "483055a5-9775- amplitude \n", + "50ae3f7e-65a8- amplitude \n", + "50b29d01-2d74- amplitude \n", + "5e756e76-68be- amplitude \n", + "67f156e1-5da7- amplitude \n", " ...\n", - " (Total: 23)" + " (Total: 26)" ] }, - "execution_count": 10, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -815,660 +901,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/edeno/miniconda3/envs/spyglass/lib/python3.9/site-packages/spikeinterface/core/waveform_extractor.py:275: UserWarning: Sorting object is not dumpable, which might result in downstream errors for parallel processing. To make the sorting dumpable, use the `sorting.save()` function.\n", - " warn(\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "c4f79735339147cf93143b0d329f7b0c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "extract waveforms memmap: 0%| | 0/2 [00:00= 0.1.0\n" - ] - }, { "data": { "text/html": [ @@ -123,15 +114,18 @@ "
\n", " \n", " \n", - " \n", - "\n", + " \n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", - "\n", + "\n", "
\n", - "

sorting_id

\n", - " \n", - "
\n", "

merge_id

\n", " \n", "
\n", "

features_param_name

\n", " a name for this set of parameters\n", "
\n", + "

sorting_id

\n", + " \n", + "
\n", + "

curation_id

\n", + " \n", + "
\n", "

recording_id

\n", " \n", "
\n", @@ -146,132 +140,129 @@ "
\n", "

interval_list_name

\n", " descriptive name of this interval list\n", - "
\n", - "

curation_id

\n", - " \n", "
08a302b6-5505-40fa-b4d5-62162f8eef58485a4ddf-332d-35b5-3ad4-0561736c1844
0233e49a-b849-7eab-7434-9c298eea87b8amplitude449b64e3-db0b-437e-a1b9-0d29928aa2dd85cb4efd-5dd9-4637-8c47-50927da56ecb0d6ec337b-f131-47fa-8d04-f152459539abclusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb45f6b9a1-eef3-46eb-866d-d0999afebda60
0ca508ee-af4c-4a89-8181-d48bd209bfd46acb99b8-6a0c-eb83-1141-5f603c5895e0d4d3d806-13dc-42b9-a149-267fa170aa8f
07239cea-7578-5409-692c-18c9d26b4d36amplitude328da21c-1d9c-41e2-9800-76b3484b707b17abb5a3-cc9a-4a7f-8fbf-ae3bcffad23909b34c86e-f2d0-4c6c-a7b8-302ef30b0fffclusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb686d9951-1c0f-4d5e-9f5c-09e6fd8bdd4c0
209dc048-6fae-4315-b293-c06fff29f947f7237e18-4e73-4aee-805b-90735e9147de24608f0d-ffca-4f56-8dd3-a274b7248b63
08be9775-370d-6492-0b4e-a5db4ce7a128amplitudeaff78f2f-2ba0-412a-95cc-447c3a2f46832056130f-b8c9-46d1-9c27-4287d237f63f0e9ea1b3c-6e7b-4960-a593-0dd6d5ab0990clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb719e8a86-fcf1-4ffc-8c1f-ea912f67ad5d0
21a9a593-f6f3-4b82-99d7-8fc46556eff37e3fa66e-727e-1541-819a-b01309bb30aec96e245d-efef-4ab6-b549-683270857dbb
11819f33-11d5-f0f8-2590-ce3d60b76f3aamplitude2402805a-04f9-4a88-9ccf-071376c8de1971add870-7efe-4e64-b5fc-079c7b6d4a8a08f4b5933-7f9d-4ca1-a262-9a7978630101clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwbd581b117-160e-4311-b096-7781a4de43940
406a20e3-5a9f-4fec-b046-a6561f72461e6d039a63-17ad-0b78-4b1e-f02d5f3dbbc59d5a025a-2b46-47b3-94f4-70d58db68e60
1c2ea289-2e7f-dcda-0464-ce97d3d6a392amplitudef1427e00-2974-4301-b2ac-b4dc29277c5146b8a445-1513-44ce-8a14-d1c9dec80d7400d247564-2302-4ace-9157-c3891eceaf2cclusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb0e848c38-9105-4ea4-b6ba-dbdd5b46a0880
4131c51b-c56d-41fa-b046-46635fc17fd9e0e9133a-7a4e-1321-a43a-e8afcb2f25da56cbb21e-8fe8-4f4a-b2b0-537ad6039543
20f24092-d191-0c58-55c8-d43d453f9fd4amplitude9e332d82-1daf-4e92-bb50-12e4f9430875aec60cb7-017c-42ed-91be-0fb2a5f759480747f4eea-6df3-422b-941e-b5aaad7ec607clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb9ed11db5-c42e-491a-8caf-7d9a37a65f130
4c5a629a-71d9-481d-ab11-a4cb0fc160879959b614-2318-f597-6651-a3a82124d28a65009b63-5830-45b5-9954-cd5341aa8cef
2598b48e-49a0-3389-dd15-0230e8d326e4amplitude3a2c3eed-413a-452a-83c8-0e4648141bdee26863d0-7a77-455c-b687-0af1bd626486034ea9dd3-b728-4bd3-872c-7a4e37fb2ac9clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb2b9fbf14-74a0-4294-a805-26702340aac90
4d629c07-1931-4e1f-a3a8-cbf1b72161e3c0eb6455-fc41-c200-b62e-e3ca81b9a3f7e4daaf56-e40d-41d3-8523-097237d98bbd
483055a5-9775-27b7-856e-01543bd920aaamplitudef07bc0b0-de6b-4424-8ef9-766213aaca269af6681f-2e37-496e-823e-7acbdd436a27073c9e01c-b37c-41a2-8571-0df13c32bf76clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb5c68f0f0-f577-4905-8a09-e4d171d0a22d0
554a9a3c-0461-48be-8435-123eed59c228912e250e-56d8-ee33-4525-c844d810971b3da02b84-1a7f-4f2a-81bf-2e92c4d88e96
50ae3f7e-65a8-5fc2-5304-ab534b90fa46amplitude7f128981-6868-4976-ba20-248655dcac212483d0c7-4cfe-4d6f-8dd6-2e13a8289d94003cc7709-66e7-47ac-a3bd-63add028d9f8clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwbf4b9301f-bc91-455b-9474-c801093f38560
7bb007f2-26d3-463f-b7dc-7bd4d271725ed7d2c97a-0e6e-d1b8-735c-d55dc66a30e18cfc1ccb-8de3-4eee-9e18-f8b8f5c45821
50b29d01-2d74-e37e-2842-ad56d833c5f9amplitudea9b7cec0-1256-49cf-abf0-8c45fd1553791dcecaac-8e0d-4d18-8296-cdb50eef95060d8a8c564-13c7-4fab-9a33-1eac416869daclusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb74270cba-36ee-4afb-ab50-2a6cc948e68c0
80e1f37f-48a7-4087-bd37-7a37b6a2c160abb92dce-4410-8f17-a501-a4104bda0dcf96678676-89dd-42e4-89f6-ce56c618ce83
5e756e76-68be-21b7-7764-cb78d9aa4ef8amplitude3c40ebdc-0b61-4105-9971-e1348bd49bc7552176ab-d870-41c4-8621-07e71f6e9a190fa4faf43-e747-43ca-b8a5-53a02d7938ecclusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwb0f91197e-bebb-4dc6-ad41-5bf89c3eed280
8848c4a8-a2f2-4f3d-82cd-51b13b8bae3c74e10781-1228-4075-0870-af224024ffdc07036486-e9f5-4dba-8662-7fb5ff2a6711
67f156e1-5da7-9c89-03b1-cc2dba88dacdamplitude257c077b-8f3b-4abb-a631-6b8084d6a1ea8f45b210-c8f9-4a27-96c2-9b85f16b3451030895f0f-1eec-481d-b763-edae7667ef00clusterless_thresholderdefault_clusterlessmediumnwb20230802_.nwbe289e03d-32ad-461a-a1cc-c885373431490
22fb2b64-fc3c-44af-a8c1-dacc9010beab
\n", "

...

\n", - "

Total: 23

\n", + "

Total: 26

\n", " " ], "text/plain": [ - "*sorting_id *merge_id *features_para recording_id sorter sorter_param_n nwb_file_name interval_list_ curation_id \n", + "*merge_id *features_para *sorting_id curation_id recording_id sorter sorter_param_n nwb_file_name interval_list_\n", "+------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+ +------------+\n", - "08a302b6-5505- 485a4ddf-332d- amplitude 449b64e3-db0b- clusterless_th default_cluste mediumnwb20230 45f6b9a1-eef3- 0 \n", - "0ca508ee-af4c- 6acb99b8-6a0c- amplitude 328da21c-1d9c- clusterless_th default_cluste mediumnwb20230 686d9951-1c0f- 0 \n", - "209dc048-6fae- f7237e18-4e73- amplitude aff78f2f-2ba0- clusterless_th default_cluste mediumnwb20230 719e8a86-fcf1- 0 \n", - "21a9a593-f6f3- 7e3fa66e-727e- amplitude 2402805a-04f9- clusterless_th default_cluste mediumnwb20230 d581b117-160e- 0 \n", - "406a20e3-5a9f- 6d039a63-17ad- amplitude f1427e00-2974- clusterless_th default_cluste mediumnwb20230 0e848c38-9105- 0 \n", - "4131c51b-c56d- e0e9133a-7a4e- amplitude 9e332d82-1daf- clusterless_th default_cluste mediumnwb20230 9ed11db5-c42e- 0 \n", - "4c5a629a-71d9- 9959b614-2318- amplitude 3a2c3eed-413a- clusterless_th default_cluste mediumnwb20230 2b9fbf14-74a0- 0 \n", - "4d629c07-1931- c0eb6455-fc41- amplitude f07bc0b0-de6b- clusterless_th default_cluste mediumnwb20230 5c68f0f0-f577- 0 \n", - "554a9a3c-0461- 912e250e-56d8- amplitude 7f128981-6868- clusterless_th default_cluste mediumnwb20230 f4b9301f-bc91- 0 \n", - "7bb007f2-26d3- d7d2c97a-0e6e- amplitude a9b7cec0-1256- clusterless_th default_cluste mediumnwb20230 74270cba-36ee- 0 \n", - "80e1f37f-48a7- abb92dce-4410- amplitude 3c40ebdc-0b61- clusterless_th default_cluste mediumnwb20230 0f91197e-bebb- 0 \n", - "8848c4a8-a2f2- 74e10781-1228- amplitude 257c077b-8f3b- clusterless_th default_cluste mediumnwb20230 e289e03d-32ad- 0 \n", + "0233e49a-b849- amplitude 85cb4efd-5dd9- 0 d6ec337b-f131- clusterless_th default_cluste mediumnwb20230 d4d3d806-13dc-\n", + "07239cea-7578- amplitude 17abb5a3-cc9a- 0 9b34c86e-f2d0- clusterless_th default_cluste mediumnwb20230 24608f0d-ffca-\n", + "08be9775-370d- amplitude 2056130f-b8c9- 0 e9ea1b3c-6e7b- clusterless_th default_cluste mediumnwb20230 c96e245d-efef-\n", + "11819f33-11d5- amplitude 71add870-7efe- 0 8f4b5933-7f9d- clusterless_th default_cluste mediumnwb20230 9d5a025a-2b46-\n", + "1c2ea289-2e7f- amplitude 46b8a445-1513- 0 0d247564-2302- clusterless_th default_cluste mediumnwb20230 56cbb21e-8fe8-\n", + "20f24092-d191- amplitude aec60cb7-017c- 0 747f4eea-6df3- clusterless_th default_cluste mediumnwb20230 65009b63-5830-\n", + "2598b48e-49a0- amplitude e26863d0-7a77- 0 34ea9dd3-b728- clusterless_th default_cluste mediumnwb20230 e4daaf56-e40d-\n", + "483055a5-9775- amplitude 9af6681f-2e37- 0 73c9e01c-b37c- clusterless_th default_cluste mediumnwb20230 3da02b84-1a7f-\n", + "50ae3f7e-65a8- amplitude 2483d0c7-4cfe- 0 03cc7709-66e7- clusterless_th default_cluste mediumnwb20230 8cfc1ccb-8de3-\n", + "50b29d01-2d74- amplitude 1dcecaac-8e0d- 0 d8a8c564-13c7- clusterless_th default_cluste mediumnwb20230 96678676-89dd-\n", + "5e756e76-68be- amplitude 552176ab-d870- 0 fa4faf43-e747- clusterless_th default_cluste mediumnwb20230 07036486-e9f5-\n", + "67f156e1-5da7- amplitude 8f45b210-c8f9- 0 30895f0f-1eec- clusterless_th default_cluste mediumnwb20230 22fb2b64-fc3c-\n", " ...\n", - " (Total: 23)" + " (Total: 26)" ] }, - "execution_count": 2, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -279,7 +270,10 @@ "source": [ "from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput\n", "import spyglass.spikesorting.v1 as sgs\n", - "from spyglass.decoding.v1.waveform_features import UnitWaveformFeaturesSelection\n", + "from spyglass.decoding.v1.waveform_features import (\n", + " UnitWaveformFeaturesSelection,\n", + " UnitWaveformFeatures,\n", + ")\n", "\n", "\n", "nwb_copy_file_name = \"mediumnwb20230802_.nwb\"\n", @@ -292,15 +286,18 @@ "\n", "feature_key = {\"features_param_name\": \"amplitude\"}\n", "\n", - "(sgs.SpikeSortingSelection & sorter_keys) * SpikeSortingOutput.CurationV1 * (\n", + "(\n", " UnitWaveformFeaturesSelection.proj(merge_id=\"spikesorting_merge_id\")\n", - " & feature_key\n", + " * SpikeSortingOutput.CurationV1\n", + " * sgs.SpikeSortingSelection\n", + ") & SpikeSortingOutput().get_restricted_merge_ids(\n", + " sorter_keys, sources=[\"v1\"], as_dict=True\n", ")" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -368,44 +365,44 @@ "

features_param_name

\n", " a name for this set of parameters\n", " \n", - " 0751a1e1-a406-7f87-ae6f-ce4ffc60621c\n", - "amplitude485a4ddf-332d-35b5-3ad4-0561736c1844\n", - "amplitude4a712103-c223-864f-82e0-6c23de79cc14\n", - "amplitude4a72c253-b3ca-8c13-e615-736a7ebff35c\n", - "amplitude5c53bd33-d57c-fbba-e0fb-55e0bcb85d03\n", - "amplitude614d796c-0b95-6364-aaa0-b6cb1e7bbb83\n", - "amplitude6acb99b8-6a0c-eb83-1141-5f603c5895e0\n", - "amplitude6d039a63-17ad-0b78-4b1e-f02d5f3dbbc5\n", - "amplitude74e10781-1228-4075-0870-af224024ffdc\n", - "amplitude7e3fa66e-727e-1541-819a-b01309bb30ae\n", - "amplitude86897349-ff68-ac72-02eb-739dd88936e6\n", - "amplitude8bbddc0f-d6ae-6260-9400-f884a6e25ae8\n", + " 0233e49a-b849-7eab-7434-9c298eea87b8\n", + "amplitude07239cea-7578-5409-692c-18c9d26b4d36\n", + "amplitude08be9775-370d-6492-0b4e-a5db4ce7a128\n", + "amplitude11819f33-11d5-f0f8-2590-ce3d60b76f3a\n", + "amplitude1c2ea289-2e7f-dcda-0464-ce97d3d6a392\n", + "amplitude20f24092-d191-0c58-55c8-d43d453f9fd4\n", + "amplitude2598b48e-49a0-3389-dd15-0230e8d326e4\n", + "amplitude483055a5-9775-27b7-856e-01543bd920aa\n", + "amplitude50ae3f7e-65a8-5fc2-5304-ab534b90fa46\n", + "amplitude50b29d01-2d74-e37e-2842-ad56d833c5f9\n", + "amplitude5e756e76-68be-21b7-7764-cb78d9aa4ef8\n", + "amplitude67f156e1-5da7-9c89-03b1-cc2dba88dacd\n", "amplitude \n", " \n", "

...

\n", - "

Total: 23

\n", + "

Total: 26

\n", " " ], "text/plain": [ "*spikesorting_ *features_para\n", "+------------+ +------------+\n", - "0751a1e1-a406- amplitude \n", - "485a4ddf-332d- amplitude \n", - "4a712103-c223- amplitude \n", - "4a72c253-b3ca- amplitude \n", - "5c53bd33-d57c- amplitude \n", - "614d796c-0b95- amplitude \n", - "6acb99b8-6a0c- amplitude \n", - "6d039a63-17ad- amplitude \n", - "74e10781-1228- amplitude \n", - "7e3fa66e-727e- amplitude \n", - "86897349-ff68- amplitude \n", - "8bbddc0f-d6ae- amplitude \n", + "0233e49a-b849- amplitude \n", + "07239cea-7578- amplitude \n", + "08be9775-370d- amplitude \n", + "11819f33-11d5- amplitude \n", + "1c2ea289-2e7f- amplitude \n", + "20f24092-d191- amplitude \n", + "2598b48e-49a0- amplitude \n", + "483055a5-9775- amplitude \n", + "50ae3f7e-65a8- amplitude \n", + "50b29d01-2d74- amplitude \n", + "5e756e76-68be- amplitude \n", + "67f156e1-5da7- amplitude \n", " ...\n", - " (Total: 23)" + " (Total: 26)" ] }, - "execution_count": 3, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -413,19 +410,19 @@ "source": [ "from spyglass.decoding.v1.waveform_features import UnitWaveformFeaturesSelection\n", "\n", - "spikesorting_merge_id = (\n", - " (sgs.SpikeSortingSelection & sorter_keys)\n", - " * SpikeSortingOutput.CurationV1\n", - " * (\n", - " UnitWaveformFeaturesSelection.proj(merge_id=\"spikesorting_merge_id\")\n", - " & feature_key\n", - " )\n", - ").fetch(\"merge_id\")\n", + "# find the merge ids that correspond to the sorter key restrictions\n", + "merge_ids = SpikeSortingOutput().get_restricted_merge_ids(\n", + " sorter_keys, sources=[\"v1\"], as_dict=True\n", + ")\n", "\n", - "waveform_selection_keys = [\n", - " {\"spikesorting_merge_id\": merge_id, \"features_param_name\": \"amplitude\"}\n", - " for merge_id in spikesorting_merge_id\n", - "]\n", + "# find the previously populated waveform selection keys that correspond to these sorts\n", + "waveform_selection_keys = (\n", + " UnitWaveformFeaturesSelection().proj(merge_id=\"spikesorting_merge_id\")\n", + " & merge_ids\n", + " & feature_key\n", + ").fetch(as_dict=True)\n", + "for key in waveform_selection_keys:\n", + " key[\"spikesorting_merge_id\"] = key.pop(\"merge_id\")\n", "\n", "UnitWaveformFeaturesSelection & waveform_selection_keys" ] @@ -2959,7 +2956,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.9.16" } }, "nbformat": 4, diff --git a/notebooks/42_Decoding_SortedSpikes.ipynb b/notebooks/42_Decoding_SortedSpikes.ipynb index 23e1888fe..66c3de7f0 100644 --- a/notebooks/42_Decoding_SortedSpikes.ipynb +++ b/notebooks/42_Decoding_SortedSpikes.ipynb @@ -21,7 +21,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -40,7 +40,7 @@ "## SortedSpikesGroup\n", "\n", "`SortedSpikesGroup` is a child table of `SpikeSortingOutput` in the spikesorting pipeline. It allows us to group the spikesorting results from multiple \n", - "sources (e.g. multiple terode groups or intervals) into a single entry. Here we will group together the spiking of multiple tetrode groups to use for decoding.\n", + "sources (e.g. multiple tetrode groups or intervals) into a single entry. Here we will group together the spiking of multiple tetrode groups to use for decoding.\n", "\n", "\n", "This table allows us filter units by their annotation labels from curation (e.g only include units labeled \"good\", exclude units labeled \"noise\") by defining parameters from `UnitSelectionParams`. When accessing data through `SortedSpikesGroup` the table will include only units with at least one label in `include_labels` and no labels in `exclude_labels`. We can look at those here:\n" @@ -48,18 +48,9 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2024-02-02 12:06:04,725][INFO]: Connecting sambray@lmf-db.cin.ucsf.edu:3306\n", - "[2024-02-02 12:06:04,762][INFO]: Connected sambray@lmf-db.cin.ucsf.edu:3306\n", - "[12:06:05][WARNING] Spyglass: Please update position_tools to >= 0.1.0\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -141,10 +132,12 @@ "=BLOB=\n", "=BLOB=exclude_noise\n", "=BLOB=\n", + "=BLOB=MS2220180629\n", + "=BLOB=\n", "=BLOB= \n", " \n", " \n", - "

Total: 3

\n", + "

Total: 4

\n", " " ], "text/plain": [ @@ -153,10 +146,11 @@ "all_units =BLOB= =BLOB= \n", "default_exclus =BLOB= =BLOB= \n", "exclude_noise =BLOB= =BLOB= \n", - " (Total: 3)" + "MS2220180629 =BLOB= =BLOB= \n", + " (Total: 4)" ] }, - "execution_count": 1, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -187,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -309,7 +303,7 @@ " (Total: 3)" ] }, - "execution_count": 2, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -326,111 +320,21 @@ " \"curation_id\": 1,\n", "}\n", "# check the set of sorting we'll use\n", - "(sgs.SpikeSortingSelection & sorter_keys) * SpikeSortingOutput.CurationV1" + "(\n", + " sgs.SpikeSortingSelection & sorter_keys\n", + ") * SpikeSortingOutput.CurationV1 & sorter_keys" ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - " \n", - "\n", - "\n", - "
\n", - "

nwb_file_name

\n", - " name of the NWB file\n", - "
\n", - "

unit_filter_params_name

\n", - " \n", - "
\n", - "

sorted_spikes_group_name

\n", - " \n", - "
mediumnwb20230802_.nwball_unitstest_group
\n", - " \n", - "

Total: 1

\n", - " " - ], - "text/plain": [ - "*nwb_file_name *unit_filter_p *sorted_spikes\n", - "+------------+ +------------+ +------------+\n", - "mediumnwb20230 all_units test_group \n", - " (Total: 1)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "from spyglass.decoding.v1.sorted_spikes import SortedSpikesGroup\n", - "\n", - "SortedSpikesGroup()" + "Finding the merge id's corresponding to an interpretable restriction such as `merge_id` or `interval_list` can require several join steps with upstream tables. To simplify this process we can use the included helper function `SpikeSortingOutput().get_restricted_merge_ids()` to perform the necessary joins and return the matching merge id's" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -502,33 +406,30 @@ " \n", " \n", " mediumnwb20230802_.nwb\n", - "all_units\n", - "test_groupmediumnwb20230802_.nwb\n", "default_exclusion\n", "test_group \n", " \n", " \n", - "

Total: 2

\n", + "

Total: 1

\n", " " ], "text/plain": [ "*nwb_file_name *unit_filter_p *sorted_spikes\n", "+------------+ +------------+ +------------+\n", - "mediumnwb20230 all_units test_group \n", "mediumnwb20230 default_exclus test_group \n", - " (Total: 2)" + " (Total: 1)" ] }, - "execution_count": 7, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get the merge_ids for the selected sorting\n", - "spikesorting_merge_ids = (\n", - " (sgs.SpikeSortingSelection & sorter_keys) * SpikeSortingOutput.CurationV1\n", - ").fetch(\"merge_id\")\n", + "spikesorting_merge_ids = SpikeSortingOutput().get_restricted_merge_ids(\n", + " sorter_keys, restrict_by_artifact=False\n", + ")\n", "\n", "# create a new sorted spikes group\n", "unit_filter_params_name = \"default_exclusion\"\n", @@ -1536,7 +1437,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.9.16" } }, "nbformat": 4, diff --git a/notebooks/py_scripts/10_Spike_SortingV1.py b/notebooks/py_scripts/10_Spike_SortingV1.py index 96ee444ff..d74c2b4d2 100644 --- a/notebooks/py_scripts/10_Spike_SortingV1.py +++ b/notebooks/py_scripts/10_Spike_SortingV1.py @@ -199,7 +199,7 @@ sgs.MetricCurationSelection.insert_selection(key) sgs.MetricCurationSelection() & key -sgs.MetricCuration.populate() +sgs.MetricCuration.populate(key) sgs.MetricCuration() & key # to do another round of curation, fetch the relevant info and insert back into CurationV1 using `insert_curation` @@ -227,7 +227,7 @@ sgs.CurationV1() -# ## Manual Curation +# ## Manual Curation (Optional) # Next we will do manual curation. this is done with figurl. to incorporate info from other stages of processing (e.g. metrics) we have to store that with kachery cloud and get curation uri referring to it. it can be done with `generate_curation_uri`. # @@ -297,8 +297,9 @@ sgs.CurationV1() -# We now insert the curated spike sorting to a `Merge` table for feeding into downstream processing pipelines. +# ## Downstream usage (Merge table) # +# Regardless of Curation method used, to make use of spikeorting results in downstream pipelines like Decoding, we will need to insert it into the `SpikeSortingOutput` merge table. # + from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput @@ -306,7 +307,27 @@ SpikeSortingOutput() # - -SpikeSortingOutput.insert([key], part_name="CurationV1") +# insert the automatic curation spikesorting results +curation_key = sss_pk.fetch1("KEY") +curation_key["curation_id"] = 1 +merge_insert_key = (sgs.CurationV1 & curation_key).fetch("KEY", as_dict=True) +SpikeSortingOutput.insert(merge_insert_key, part_name="CurationV1") SpikeSortingOutput.merge_view() -SpikeSortingOutput.CurationV1() +# Finding the merge id's corresponding to an interpretable restriction such as `merge_id` or `interval_list` can require several join steps with upstream tables. To simplify this process we can use the included helper function `SpikeSortingOutput().get_restricted_merge_ids()` to perform the necessary joins and return the matching merge id's + +selection_key = { + "nwb_file_name": nwb_file_name2, + "sorter": "mountainsort4", + "interval_list_name": "01_s1", + "curation_id": 0, +} # this function can use restrictions from throughout the spikesorting pipeline +spikesorting_merge_ids = SpikeSortingOutput().get_restricted_merge_ids( + selection_key, as_dict=True +) +spikesorting_merge_ids + +# With the spikesorting merge_ids we want we can also use the method `get_sort_group_info` to get a table linking the merge id to the electrode group it is sourced from. This can be helpful for restricting to just electrodes from a brain area of interest + +merge_keys = [{"merge_id": str(id)} for id in spikesorting_merge_ids] +SpikeSortingOutput().get_sort_group_info(merge_keys) diff --git a/notebooks/py_scripts/40_Extracting_Clusterless_Waveform_Features.py b/notebooks/py_scripts/40_Extracting_Clusterless_Waveform_Features.py new file mode 100644 index 000000000..ad17a7c6f --- /dev/null +++ b/notebooks/py_scripts/40_Extracting_Clusterless_Waveform_Features.py @@ -0,0 +1,348 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.15.2 +# kernelspec: +# display_name: spyglass +# language: python +# name: python3 +# --- + +# _Developer Note:_ if you may make a PR in the future, be sure to copy this +# notebook, and use the `gitignore` prefix `temp` to avoid future conflicts. +# +# This is one notebook in a multi-part series on clusterless decoding in Spyglass +# +# - To set up your Spyglass environment and database, see +# [the Setup notebook](./00_Setup.ipynb) +# - For additional info on DataJoint syntax, including table definitions and +# inserts, see +# [the Insert Data notebook](./01_Insert_Data.ipynb) +# - Prior to running, please familiarize yourself with the [spike sorting +# pipeline](./02_Spike_Sorting.ipynb) and generate input position data with +# either the [Trodes](./20_Position_Trodes.ipynb) or DLC notebooks +# ([1](./21_Position_DLC_1.ipynb), [2](./22_Position_DLC_2.ipynb), +# [3](./23_Position_DLC_3.ipynb)). +# +# The goal of this notebook is to populate the `UnitWaveformFeatures` table, which depends `SpikeSortingOutput`. This table contains the features of the waveforms of each unit. +# +# While clusterless decoding avoids actual spike sorting, we need to pass through these tables to maintain (relative) pipeline simplicity. Pass-through tables keep spike sorting and clusterless waveform extraction as similar as possible, by using shared steps. Here, "spike sorting" involves simple thresholding (sorter: clusterless_thresholder). +# + +# + +from pathlib import Path +import datajoint as dj + +dj.config.load( + Path("../dj_local_conf.json").absolute() +) # load config for database connection info +# - + +# First, if you haven't inserted the the `mediumnwb20230802.wnb` file into the database, you should do so now. This is the file that we will use for the decoding tutorials. +# +# It is a truncated version of the full NWB file, so it will run faster, but bigger than the minirec file we used in the previous tutorials so that decoding makes sense. +# + +# + +from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename +import spyglass.data_import as sgi +import spyglass.position as sgp + +# Insert the nwb file +nwb_file_name = "mediumnwb20230802.nwb" +nwb_copy_file_name = get_nwb_copy_filename(nwb_file_name) +sgi.insert_sessions(nwb_file_name) + +# Position +sgp.v1.TrodesPosParams.insert_default() + +interval_list_name = "pos 0 valid times" + +trodes_s_key = { + "nwb_file_name": nwb_copy_file_name, + "interval_list_name": interval_list_name, + "trodes_pos_params_name": "default", +} +sgp.v1.TrodesPosSelection.insert1( + trodes_s_key, + skip_duplicates=True, +) +sgp.v1.TrodesPosV1.populate(trodes_s_key) +# - + +# These next steps are the same as in the [Spike Sorting notebook](./10_Spike_SortingV1.ipynb), but we'll repeat them here for clarity. These are pre-processing steps that are shared between spike sorting and clusterless decoding. +# +# We first set the `SortGroup` to define which contacts are sorted together. +# +# We then setup for spike sorting by bandpass filtering and whitening the data via the `SpikeSortingRecording` table. +# + +# + +import spyglass.spikesorting.v1 as sgs + +sgs.SortGroup.set_group_by_shank(nwb_file_name=nwb_copy_file_name) + +sort_group_ids = (sgs.SortGroup & {"nwb_file_name": nwb_copy_file_name}).fetch( + "sort_group_id" +) + +group_keys = [] +for sort_group_id in sort_group_ids: + key = { + "nwb_file_name": nwb_copy_file_name, + "sort_group_id": sort_group_id, + "interval_list_name": interval_list_name, + "preproc_param_name": "default", + "team_name": "Alison Comrie", + } + group_keys.append(key) + sgs.SpikeSortingRecordingSelection.insert_selection(key) + +sgs.SpikeSortingRecording.populate(group_keys) +# - + +# Next we do artifact detection. Here we skip it by setting the `artifact_param_name` to `None`, but in practice you should detect artifacts as it will affect the decoding. +# + +# + +recording_ids = ( + sgs.SpikeSortingRecordingSelection & {"nwb_file_name": nwb_copy_file_name} +).fetch("recording_id") + +group_keys = [] +for recording_id in recording_ids: + key = { + "recording_id": recording_id, + "artifact_param_name": "none", + } + group_keys.append(key) + sgs.ArtifactDetectionSelection.insert_selection(key) + +sgs.ArtifactDetection.populate(group_keys) +# - + +# Now we run the "spike sorting", which in our case is simply thresholding the signal to find spikes. We use the `SpikeSorting` table to store the results. Note that `sorter_param_name` defines the parameters for thresholding the signal. +# + +(sgs.SpikeSorterParameters() & {"sorter": "clusterless_thresholder"}).fetch1() + +group_keys = [] +for recording_id in recording_ids: + key = { + "recording_id": recording_id, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", + "nwb_file_name": nwb_copy_file_name, + "interval_list_name": str( + ( + sgs.ArtifactDetectionSelection + & {"recording_id": recording_id, "artifact_param_name": "none"} + ).fetch1("artifact_id") + ), + } + group_keys.append(key) + sgs.SpikeSortingSelection.insert_selection(key) +sort_keys = (sgs.SpikeSortingSelection & group_keys).fetch("KEY") +sgs.SpikeSorting.populate(sort_keys) + +# For clusterless decoding we do not need any manual curation, but for the sake of the pipeline, we need to store the output of the thresholding in the `CurationV1` table and insert this into the `SpikeSortingOutput` table. +# + +sgs.SpikeSorting().populate( + sgs.SpikeSortingSelection + & { + "nwb_file_name": nwb_copy_file_name, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", + } +) + +# + +from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput + +sorting_ids = ( + sgs.SpikeSortingSelection + & { + "nwb_file_name": nwb_copy_file_name, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", + } +).fetch("sorting_id") + +for sorting_id in sorting_ids: + try: + sgs.CurationV1.insert_curation(sorting_id=sorting_id) + except KeyError as e: + pass + +SpikeSortingOutput.insert( + sgs.CurationV1().fetch("KEY"), + part_name="CurationV1", + skip_duplicates=True, +) +# - + +# Finally, we extract the waveform features of each SortGroup. This is done by the `UnitWaveformFeatures` table. +# +# To set this up, we use the `WaveformFeaturesParams` to define the time around the spike that we want to use for feature extraction, and which features to extract. Here is an example of the parameters used for extraction the amplitude of the negative peak of the waveform: +# +# ```python +# +# waveform_extraction_params = { +# "ms_before": 0.5, +# "ms_after": 0.5, +# "max_spikes_per_unit": None, +# "n_jobs": 5, +# "total_memory": "5G", +# } +# waveform_feature_params = { +# "amplitude": { +# "peak_sign": "neg", +# "estimate_peak_time": False, +# } +# } +# ``` +# +# We see that we want 0.5 ms of time before and after the peak of the negative spike. We also see that we want to extract the amplitude of the negative peak, and that we do not want to estimate the peak time (since we know it is at 0 ms). +# +# You can define other features to extract such as spatial location of the spike: +# +# ```python +# waveform_extraction_params = { +# "ms_before": 0.5, +# "ms_after": 0.5, +# "max_spikes_per_unit": None, +# "n_jobs": 5, +# "total_memory": "5G", +# } +# waveform_feature_params = { +# "amplitude": { +# "peak_sign": "neg", +# "estimate_peak_time": False, +# }, +# "spike location": {} +# } +# +# ``` +# +# _Note_: Members of the Frank Lab can use "ampl_10_jobs_v2" instead of "amplitude" +# for significant speed improvements. +# + +# + +from spyglass.decoding.v1.waveform_features import WaveformFeaturesParams + +waveform_extraction_params = { + "ms_before": 0.5, + "ms_after": 0.5, + "max_spikes_per_unit": None, + "n_jobs": 5, + "total_memory": "5G", +} +waveform_feature_params = { + "amplitude": { + "peak_sign": "neg", + "estimate_peak_time": False, + } +} + +WaveformFeaturesParams.insert1( + { + "features_param_name": "amplitude", + "params": { + "waveform_extraction_params": waveform_extraction_params, + "waveform_feature_params": waveform_feature_params, + }, + }, + skip_duplicates=True, +) + +WaveformFeaturesParams() +# - + +# Now that we've inserted the waveform features parameters, we need to define which parameters to use for each SortGroup. This is done by the `UnitWaveformFeaturesSelection` table. We need to link the primary key `merge_id` from the `SpikeSortingOutput` table to a features parameter set. +# + +# + +from spyglass.decoding.v1.waveform_features import UnitWaveformFeaturesSelection + +UnitWaveformFeaturesSelection() +# - + +# First we find the units we need. We can use the method `SpikeSortingOutput.get_restricted_merge_ids()` to perform the needed joins to find them: +# + +nwb_copy_file_name = "mediumnwb20230802_.nwb" +from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput + +key = { + "nwb_file_name": nwb_copy_file_name, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", +} +merge_ids = SpikeSortingOutput().get_restricted_merge_ids( + { + "nwb_file_name": nwb_copy_file_name, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", + }, + sources=["v1"], +) + +# Then we link them with the features parameters: +# + +# + +selection_keys = [ + { + "spikesorting_merge_id": merge_id, + "features_param_name": "amplitude", + } + for merge_id in merge_ids +] +UnitWaveformFeaturesSelection.insert(selection_keys, skip_duplicates=True) + +UnitWaveformFeaturesSelection & selection_keys +# - + +# Finally, we extract the waveform features, by populating the `UnitWaveformFeatures` table: +# + +# + +from spyglass.decoding.v1.waveform_features import UnitWaveformFeatures + +UnitWaveformFeatures.populate(selection_keys) +# - + +UnitWaveformFeatures & selection_keys + +# Now that we've extracted the data, we can inspect the results. Let's fetch the data: +# + +spike_times, spike_waveform_features = ( + UnitWaveformFeatures & selection_keys +).fetch_data() + +# Let's look at the features shape. This is a list corresponding to tetrodes, with each element being a numpy array of shape (n_spikes, n_features). The features in this case are the amplitude of each tetrode wire at the negative peak of the waveform. +# + +for features in spike_waveform_features: + print(features.shape) + +# We can plot the amplitudes to see if there is anything that looks neural and to look for outliers: +# + +# + +import matplotlib.pyplot as plt + +tetrode_ind = 1 +plt.scatter( + spike_waveform_features[tetrode_ind][:, 0], + spike_waveform_features[tetrode_ind][:, 1], + s=1, +) +# - diff --git a/notebooks/py_scripts/41_Decoding_Clusterless.py b/notebooks/py_scripts/41_Decoding_Clusterless.py new file mode 100644 index 000000000..6c286bf8d --- /dev/null +++ b/notebooks/py_scripts/41_Decoding_Clusterless.py @@ -0,0 +1,450 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.15.2 +# kernelspec: +# display_name: spyglass +# language: python +# name: python3 +# --- + +# # Clusterless Decoding +# +# ## Overview +# +# _Developer Note:_ if you may make a PR in the future, be sure to copy this +# notebook, and use the `gitignore` prefix `temp` to avoid future conflicts. +# +# This is one notebook in a multi-part series on Spyglass. +# +# - To set up your Spyglass environment and database, see +# [the Setup notebook](./00_Setup.ipynb) +# - This tutorial assumes you've already +# [extracted waveforms](./41_Extracting_Clusterless_Waveform_Features.ipynb), as well as loaded +# [position data](./20_Position_Trodes.ipynb). If 1D decoding, this data should also be +# [linearized](./24_Linearization.ipynb). +# +# Clusterless decoding can be performed on either 1D or 2D data. We will start with 2D data. +# +# ## Elements of Clusterless Decoding +# - **Position Data**: This is the data that we want to decode. It can be 1D or 2D. +# - **Spike Waveform Features**: These are the features that we will use to decode the position data. +# - **Decoding Model Parameters**: This is how we define the model that we will use to decode the position data. +# +# ## Grouping Data +# An important concept will be groups. Groups are tables that allow use to specify collections of data. We will use groups in two situations here: +# 1. Because we want to decode from more than one tetrode (or probe), so we will create a group that contains all of the tetrodes that we want to decode from. +# 2. Similarly, we will create a group for the position data that we want to decode, so that we can decode from position data from multiple sessions. +# +# ### Grouping Waveform Features +# Let's start with grouping the Waveform Features. We will first inspect the waveform features that we have extracted to figure out the primary keys of the data that we want to decode from. We need to use the tables `SpikeSortingSelection` and `SpikeSortingOutput` to figure out the `merge_id` associated with `nwb_file_name` to get the waveform features associated with the NWB file of interest. +# + +# + +from pathlib import Path +import datajoint as dj + +dj.config.load( + Path("../dj_local_conf.json").absolute() +) # load config for database connection info + +# + +from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput +import spyglass.spikesorting.v1 as sgs +from spyglass.decoding.v1.waveform_features import ( + UnitWaveformFeaturesSelection, + UnitWaveformFeatures, +) + + +nwb_copy_file_name = "mediumnwb20230802_.nwb" + +sorter_keys = { + "nwb_file_name": nwb_copy_file_name, + "sorter": "clusterless_thresholder", + "sorter_param_name": "default_clusterless", +} + +feature_key = {"features_param_name": "amplitude"} + +( + UnitWaveformFeaturesSelection.proj(merge_id="spikesorting_merge_id") + * SpikeSortingOutput.CurationV1 + * sgs.SpikeSortingSelection +) & SpikeSortingOutput().get_restricted_merge_ids( + sorter_keys, sources=["v1"], as_dict=True +) + +# + +from spyglass.decoding.v1.waveform_features import UnitWaveformFeaturesSelection + +# find the merge ids that correspond to the sorter key restrictions +merge_ids = SpikeSortingOutput().get_restricted_merge_ids( + sorter_keys, sources=["v1"], as_dict=True +) + +# find the previously populated waveform selection keys that correspond to these sorts +waveform_selection_keys = ( + UnitWaveformFeaturesSelection().proj(merge_id="spikesorting_merge_id") + & merge_ids + & feature_key +).fetch(as_dict=True) +for key in waveform_selection_keys: + key["spikesorting_merge_id"] = key.pop("merge_id") + +UnitWaveformFeaturesSelection & waveform_selection_keys +# - + +# We will create a group called `test_group` that contains all of the tetrodes that we want to decode from. We will use the `create_group` function to create this group. This function takes two arguments: the name of the group, and the keys of the tables that we want to include in the group. + +# + +from spyglass.decoding.v1.clusterless import UnitWaveformFeaturesGroup + +UnitWaveformFeaturesGroup().create_group( + nwb_file_name=nwb_copy_file_name, + group_name="test_group", + keys=waveform_selection_keys, +) +UnitWaveformFeaturesGroup & {"waveform_features_group_name": "test_group"} +# - + +# We can see that we successfully associated "test_group" with the tetrodes that we want to decode from by using the `get_group` function. + +UnitWaveformFeaturesGroup.UnitFeatures & { + "nwb_file_name": nwb_copy_file_name, + "waveform_features_group_name": "test_group", +} + +# ### Grouping Position Data +# +# We will now create a group called `02_r1` that contains all of the position data that we want to decode from. As before, we will use the `create_group` function to create this group. This function takes two arguments: the name of the group, and the keys of the tables that we want to include in the group. +# +# We use the the `PositionOutput` table to figure out the `merge_id` associated with `nwb_file_name` to get the position data associated with the NWB file of interest. In this case, we only have one position to insert, but we could insert multiple positions if we wanted to decode from multiple sessions. +# +# Note that the position data sampling frequency is what determines the time step of the decoding. In this case, the position data sampling frequency is 30 Hz, so the time step of the decoding will be 1/30 seconds. In practice, you will want to use a smaller time step such as 500 Hz. This will allow you to decode at a finer time scale. To do this, you will want to interpolate the position data to a higher sampling frequency as shown in the [position trodes notebook](./20_Position_Trodes.ipynb). +# +# You will also want to specify the name of the position variables if they are different from the default names. The default names are `position_x` and `position_y`. + +# + +from spyglass.position import PositionOutput +import spyglass.position as sgp + + +sgp.v1.TrodesPosParams.insert1( + { + "trodes_pos_params_name": "default_decoding", + "params": { + "max_LED_separation": 9.0, + "max_plausible_speed": 300.0, + "position_smoothing_duration": 0.125, + "speed_smoothing_std_dev": 0.100, + "orient_smoothing_std_dev": 0.001, + "led1_is_front": 1, + "is_upsampled": 1, + "upsampling_sampling_rate": 250, + "upsampling_interpolation_method": "linear", + }, + }, + skip_duplicates=True, +) + +trodes_s_key = { + "nwb_file_name": nwb_copy_file_name, + "interval_list_name": "pos 0 valid times", + "trodes_pos_params_name": "default_decoding", +} +sgp.v1.TrodesPosSelection.insert1( + trodes_s_key, + skip_duplicates=True, +) +sgp.v1.TrodesPosV1.populate(trodes_s_key) + +PositionOutput.TrodesPosV1 & trodes_s_key + +# + +from spyglass.decoding.v1.core import PositionGroup + +position_merge_ids = ( + PositionOutput.TrodesPosV1 + & { + "nwb_file_name": nwb_copy_file_name, + "interval_list_name": "pos 0 valid times", + "trodes_pos_params_name": "default_decoding", + } +).fetch("merge_id") + +PositionGroup().create_group( + nwb_file_name=nwb_copy_file_name, + group_name="test_group", + keys=[{"pos_merge_id": merge_id} for merge_id in position_merge_ids], +) + +PositionGroup & { + "nwb_file_name": nwb_copy_file_name, + "position_group_name": "test_group", +} +# - + +( + PositionGroup + & {"nwb_file_name": nwb_copy_file_name, "position_group_name": "test_group"} +).fetch1("position_variables") + +PositionGroup.Position & { + "nwb_file_name": nwb_copy_file_name, + "position_group_name": "test_group", +} + +# ## Decoding Model Parameters +# +# We will use the `non_local_detector` package to decode the data. This package is highly flexible and allows several different types of models to be used. In this case, we will use the `ContFragClusterlessClassifier` to decode the data. This has two discrete states: Continuous and Fragmented, which correspond to different types of movement models. To read more about this model, see: +# > Denovellis, E.L., Gillespie, A.K., Coulter, M.E., Sosa, M., Chung, J.E., Eden, U.T., and Frank, L.M. (2021). Hippocampal replay of experience at real-world speeds. eLife 10, e64505. [10.7554/eLife.64505](https://doi.org/10.7554/eLife.64505). +# +# Let's first look at the model and the default parameters: +# + +# + +from non_local_detector.models import ContFragClusterlessClassifier + +ContFragClusterlessClassifier() +# - + +# You can change these parameters like so: + +# + +from non_local_detector.models import ContFragClusterlessClassifier + +ContFragClusterlessClassifier( + clusterless_algorithm_params={ + "block_size": 10000, + "position_std": 12.0, + "waveform_std": 24.0, + }, +) +# - + +# This is how to insert the model parameters into the database: + +# + +from spyglass.decoding.v1.core import DecodingParameters + + +DecodingParameters.insert1( + { + "decoding_param_name": "contfrag_clusterless", + "decoding_params": ContFragClusterlessClassifier(), + "decoding_kwargs": dict(), + }, + skip_duplicates=True, +) + +DecodingParameters & {"decoding_param_name": "contfrag_clusterless"} +# - + +# We can retrieve these parameters and rebuild the model like so: + +# + +model_params = ( + DecodingParameters & {"decoding_param_name": "contfrag_clusterless"} +).fetch1() + +ContFragClusterlessClassifier(**model_params["decoding_params"]) +# - + +# ### 1D Decoding +# +# If you want to do 1D decoding, you will need to specify the `track_graph`, `edge_order`, and `edge_spacing` in the `environments` parameter. You can read more about these parameters in the [linearization notebook](./24_Linearization.ipynb). You can retrieve these parameters from the `TrackGraph` table if you have stored them there. These will then go into the `environments` parameter of the `ContFragClusterlessClassifier` model. + +# + +from non_local_detector.environment import Environment + +# ?Environment +# - + +# ## Decoding +# +# Now that we have grouped the data and defined the model parameters, we have finally set up the elements in tables that we need to decode the data. We now need to use the `ClusterlessDecodingSelection` to fully specify all the parameters and data that we want. +# +# This has: +# - `waveform_features_group_name`: the name of the group that contains the waveform features that we want to decode from +# - `position_group_name`: the name of the group that contains the position data that we want to decode from +# - `decoding_param_name`: the name of the decoding parameters that we want to use +# - `nwb_file_name`: the name of the NWB file that we want to decode from +# - `encoding_interval`: the interval of time that we want to train the initial model on +# - `decoding_interval`: the interval of time that we want to decode from +# - `estimate_decoding_params`: whether or not we want to estimate the decoding parameters +# +# +# The first three parameters should be familiar to you. +# +# +# ### Decoding and Encoding Intervals +# The `encoding_interval` is the interval of time that we want to train the initial model on. The `decoding_interval` is the interval of time that we want to decode from. These two intervals can be the same, but they do not have to be. For example, we may want to train the model on a long interval of time, but only decode from a short interval of time. This is useful if we want to decode from a short interval of time that is not representative of the entire session. In this case, we will train the model on a longer interval of time that is representative of the entire session. +# +# These keys come from the `IntervalList` table. We can see that the `IntervalList` table contains the `nwb_file_name` and `interval_name` that we need to specify the `encoding_interval` and `decoding_interval`. We will specify a short decoding interval called `test decoding interval` and use that to decode from. +# +# +# ### Estimating Decoding Parameters +# The last parameter is `estimate_decoding_params`. This is a boolean that specifies whether or not we want to estimate the decoding parameters. If this is `True`, then we will estimate the initial conditions and discrete transition matrix from the data. +# +# NOTE: If estimating parameters, then we need to treat times outside decoding interval as missing. this means that times outside the decoding interval will not use the spiking data and only the state transition matrix and previous time step will be used. This may or may not be desired depending on the length of this missing interval. +# + +# + +from spyglass.decoding.v1.clusterless import ClusterlessDecodingSelection + +ClusterlessDecodingSelection() + +# + +from spyglass.common import IntervalList + +IntervalList & {"nwb_file_name": nwb_copy_file_name} + +# + +decoding_interval_valid_times = [ + [1625935714.6359036, 1625935714.6359036 + 15.0] +] + +IntervalList.insert1( + { + "nwb_file_name": "mediumnwb20230802_.nwb", + "interval_list_name": "test decoding interval", + "valid_times": decoding_interval_valid_times, + }, + skip_duplicates=True, +) +# - + +# Once we have figured out the keys that we need, we can insert the `ClusterlessDecodingSelection` into the database. + +# + +selection_key = { + "waveform_features_group_name": "test_group", + "position_group_name": "test_group", + "decoding_param_name": "contfrag_clusterless", + "nwb_file_name": nwb_copy_file_name, + "encoding_interval": "pos 0 valid times", + "decoding_interval": "test decoding interval", + "estimate_decoding_params": False, +} + +ClusterlessDecodingSelection.insert1( + selection_key, + skip_duplicates=True, +) + +ClusterlessDecodingSelection & selection_key +# - + +ClusterlessDecodingSelection() + +# To run decoding, we simply populate the `ClusterlessDecodingOutput` table. This will run the decoding and insert the results into the database. We can then retrieve the results from the database. + +# + +from spyglass.decoding.v1.clusterless import ClusterlessDecodingV1 + +ClusterlessDecodingV1.populate(selection_key) +# - + +# We can now see it as an entry in the `DecodingOutput` table. + +# + +from spyglass.decoding.decoding_merge import DecodingOutput + +DecodingOutput.ClusterlessDecodingV1 & selection_key +# - + +# We can load the results of the decoding: + +decoding_results = (ClusterlessDecodingV1 & selection_key).fetch_results() +decoding_results + +# Finally, if we deleted the results, we can use the `cleanup` function to delete the results from the file system: + +DecodingOutput().cleanup() + +# ## Visualization of decoding output. +# +# The output of decoding can be challenging to visualize with static graphs, especially if the decoding is performed on 2D data. +# +# We can interactively visualize the output of decoding using the [figurl](https://github.com/flatironinstitute/figurl) package. This package allows to create a visualization of the decoding output that can be viewed in a web browser. This is useful for exploring the decoding output over time and sharing the results with others. +# +# **NOTE**: You will need a kachery cloud instance to use this feature. If you are a member of the Frank lab, you should have access to the Frank lab kachery cloud instance. If you are not a member of the Frank lab, you can create your own kachery cloud instance by following the instructions [here](https://github.com/flatironinstitute/kachery-cloud/blob/main/doc/create_kachery_zone.md). +# +# For each user, you will need to run `kachery-cloud-init` in the terminal and follow the instructions to associate your computer with your GitHub user on the kachery-cloud network. +# + +# + +# from non_local_detector.visualization import ( +# create_interactive_2D_decoding_figurl, +# ) + +# ( +# position_info, +# position_variable_names, +# ) = ClusterlessDecodingV1.fetch_position_info(selection_key) +# results_time = decoding_results.acausal_posterior.isel(intervals=0).time.values +# position_info = position_info.loc[results_time[0] : results_time[-1]] + +# env = ClusterlessDecodingV1.fetch_environments(selection_key)[0] +# spike_times, _ = ClusterlessDecodingV1.fetch_spike_data(selection_key) + + +# create_interactive_2D_decoding_figurl( +# position_time=position_info.index.to_numpy(), +# position=position_info[position_variable_names], +# env=env, +# results=decoding_results, +# posterior=decoding_results.acausal_posterior.isel(intervals=0) +# .unstack("state_bins") +# .sum("state"), +# spike_times=spike_times, +# head_dir=position_info["orientation"], +# speed=position_info["speed"], +# ) +# - + +# ## GPUs +# We can use GPUs for decoding which will result in a significant speedup. This is achieved using the [jax](https://jax.readthedocs.io/en/latest/) package. +# +# ### Ensuring jax can find a GPU +# Assuming you've set up a GPU, we can use `jax.devices()` to make sure the decoding code can see the GPU. If a GPU is available, it will be listed. +# +# In the following instance, we do not have a GPU: + +# + +import jax + +jax.devices() +# - + +# ### Selecting a GPU +# If you do have multiple GPUs, you can use the `jax` package to set the device (GPU) that you want to use. For example, if you want to use the second GPU, you can use the following code (uncomment first): + +# + +# device_id = 2 +# device = jax.devices()[device_id] +# jax.config.update("jax_default_device", device) +# device +# - + +# ### Monitoring GPU Usage +# +# You can see which GPUs are occupied (if you have multiple GPUs) by running the command `nvidia-smi` in +# a terminal (or `!nvidia-smi` in a notebook). Pick a GPU with low memory usage. +# +# We can monitor GPU use with the terminal command `watch -n 0.1 nvidia-smi`, will +# update `nvidia-smi` every 100 ms. This won't work in a notebook, as it won't +# display the updates. +# +# Other ways to monitor GPU usage are: +# +# - A +# [jupyter widget by nvidia](https://github.com/rapidsai/jupyterlab-nvdashboard) +# to monitor GPU usage in the notebook +# - A [terminal program](https://github.com/peci1/nvidia-htop) like nvidia-smi +# with more information about which GPUs are being utilized and by whom. diff --git a/notebooks/py_scripts/42_Decoding_SortedSpikes.py b/notebooks/py_scripts/42_Decoding_SortedSpikes.py new file mode 100644 index 000000000..74576b553 --- /dev/null +++ b/notebooks/py_scripts/42_Decoding_SortedSpikes.py @@ -0,0 +1,182 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.15.2 +# kernelspec: +# display_name: spyglass +# language: python +# name: python3 +# --- + +# # Sorted Spikes Decoding +# +# The mechanics of decoding with sorted spikes are largely similar to those of decoding with unsorted spikes. You should familiarize yourself with the [clusterless decoding tutorial](./42_Decoding_Clusterless.ipynb) before proceeding with this one. +# +# The elements we will need to decode with sorted spikes are: +# - `PositionGroup` +# - `SortedSpikesGroup` +# - `DecodingParameters` +# - `encoding_interval` +# - `decoding_interval` +# +# This time, instead of extracting waveform features, we can proceed directly from the SpikeSortingOutput table to specify which units we want to decode. The rest of the decoding process is the same as before. +# +# + +# + +from pathlib import Path +import datajoint as dj + +dj.config.load( + Path("../dj_local_conf.json").absolute() +) # load config for database connection info +# - + +# ## SortedSpikesGroup +# +# `SortedSpikesGroup` is a child table of `SpikeSortingOutput` in the spikesorting pipeline. It allows us to group the spikesorting results from multiple +# sources (e.g. multiple tetrode groups or intervals) into a single entry. Here we will group together the spiking of multiple tetrode groups to use for decoding. +# +# +# This table allows us filter units by their annotation labels from curation (e.g only include units labeled "good", exclude units labeled "noise") by defining parameters from `UnitSelectionParams`. When accessing data through `SortedSpikesGroup` the table will include only units with at least one label in `include_labels` and no labels in `exclude_labels`. We can look at those here: +# + +# + +from spyglass.spikesorting.analysis.v1.group import UnitSelectionParams + +UnitSelectionParams().insert_default() + +# look at the filter set we'll use here +unit_filter_params_name = "default_exclusion" +print( + ( + UnitSelectionParams() + & {"unit_filter_params_name": unit_filter_params_name} + ).fetch1() +) +# look at full table +UnitSelectionParams() +# - + +# Now we can make our sorted spikes group with this unit selection parameter + +# + +from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput +import spyglass.spikesorting.v1 as sgs + +nwb_copy_file_name = "mediumnwb20230802_.nwb" + +sorter_keys = { + "nwb_file_name": nwb_copy_file_name, + "sorter": "mountainsort4", + "curation_id": 1, +} +# check the set of sorting we'll use +( + sgs.SpikeSortingSelection & sorter_keys +) * SpikeSortingOutput.CurationV1 & sorter_keys +# - + +# Finding the merge id's corresponding to an interpretable restriction such as `merge_id` or `interval_list` can require several join steps with upstream tables. To simplify this process we can use the included helper function `SpikeSortingOutput().get_restricted_merge_ids()` to perform the necessary joins and return the matching merge id's + +# + +# get the merge_ids for the selected sorting +spikesorting_merge_ids = SpikeSortingOutput().get_restricted_merge_ids( + sorter_keys, restrict_by_artifact=False +) + +# create a new sorted spikes group +unit_filter_params_name = "default_exclusion" +SortedSpikesGroup().create_group( + group_name="test_group", + nwb_file_name=nwb_copy_file_name, + keys=[ + {"spikesorting_merge_id": merge_id} + for merge_id in spikesorting_merge_ids + ], + unit_filter_params_name=unit_filter_params_name, +) +# check the new group +SortedSpikesGroup & { + "nwb_file_name": nwb_copy_file_name, + "sorted_spikes_group_name": "test_group", +} +# - + +# look at the sorting within the group we just made +SortedSpikesGroup.Units & { + "nwb_file_name": nwb_copy_file_name, + "sorted_spikes_group_name": "test_group", + "unit_filter_params_name": unit_filter_params_name, +} + +# ## Model parameters +# +# As before we can specify the model parameters. The only difference is that we will use the `ContFragSortedSpikesClassifier` instead of the `ContFragClusterlessClassifier`. + +# + +from spyglass.decoding.v1.core import DecodingParameters +from non_local_detector.models import ContFragSortedSpikesClassifier + + +DecodingParameters.insert1( + { + "decoding_param_name": "contfrag_sorted", + "decoding_params": ContFragSortedSpikesClassifier(), + "decoding_kwargs": dict(), + }, + skip_duplicates=True, +) + +DecodingParameters() +# - + +# ### 1D Decoding +# +# As in the clusterless notebook, we can decode 1D position if we specify the `track_graph`, `edge_order`, and `edge_spacing` parameters in the `Environment` class constructor. See the [clusterless decoding tutorial](./42_Decoding_Clusterless.ipynb) for more details. + +# ## Decoding +# +# Now we can decode the position using the sorted spikes using the `SortedSpikesDecodingSelection` table. Here we assume that `PositionGroup` has been specified as in the clusterless decoding tutorial. + +# + +selection_key = { + "sorted_spikes_group_name": "test_group", + "unit_filter_params_name": "default_exclusion", + "position_group_name": "test_group", + "decoding_param_name": "contfrag_sorted", + "nwb_file_name": "mediumnwb20230802_.nwb", + "encoding_interval": "pos 0 valid times", + "decoding_interval": "test decoding interval", + "estimate_decoding_params": False, +} + +from spyglass.decoding import SortedSpikesDecodingSelection + +SortedSpikesDecodingSelection.insert1( + selection_key, + skip_duplicates=True, +) + +# + +from spyglass.decoding.v1.sorted_spikes import SortedSpikesDecodingV1 + +SortedSpikesDecodingV1.populate(selection_key) +# - + +# We verify that the results have been inserted into the `DecodingOutput` merge table. + +# + +from spyglass.decoding.decoding_merge import DecodingOutput + +DecodingOutput.SortedSpikesDecodingV1 & selection_key +# - + +# We can load the results as before: + +results = (SortedSpikesDecodingV1 & selection_key).fetch_results() +results diff --git a/src/spyglass/spikesorting/spikesorting_merge.py b/src/spyglass/spikesorting/spikesorting_merge.py index ee41e1e04..fda83cdd7 100644 --- a/src/spyglass/spikesorting/spikesorting_merge.py +++ b/src/spyglass/spikesorting/spikesorting_merge.py @@ -7,9 +7,16 @@ from spyglass.spikesorting.v0.spikesorting_curation import ( # noqa: F401 CuratedSpikeSorting, ) -from spyglass.spikesorting.v1.curation import CurationV1 # noqa: F401 +from spyglass.spikesorting.v1 import ( # noqa: F401 + ArtifactDetectionSelection, + CurationV1, + MetricCurationSelection, + SpikeSortingRecordingSelection, + SpikeSortingSelection, +) from spyglass.utils.dj_merge_tables import _Merge from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils.logging import logger schema = dj.schema("spikesorting_merge") @@ -50,6 +57,93 @@ class CuratedSpikeSorting(SpyglassMixin, dj.Part): # noqa: F811 -> CuratedSpikeSorting """ + def get_restricted_merge_ids( + self, + key: dict, + sources: list = ["v0", "v1"], + restrict_by_artifact: bool = True, + as_dict: bool = False, + ): + """Helper function to get merge ids for a given interpretable key + + Parameters + ---------- + key : dict + restriction for any stage of the spikesorting pipeline + sources : list, optional + list of sources to restrict to + restrict_by_artifact : bool, optional + whether to restrict by artifact rather than original interval name. Relevant to v1 pipeline, by default True + as_dict : bool, optional + whether to return merge_ids as a list of dictionaries, by default False + + Returns + ------- + merge_ids : list + list of merge ids from the restricted sources + """ + merge_ids = [] + + if "v1" in sources: + key_v1 = key.copy() + # Recording restriction + table = SpikeSortingRecordingSelection() & key_v1 + if restrict_by_artifact: + # Artifact restriction + table_artifact = ArtifactDetectionSelection * table & key_v1 + artifact_restrict = table_artifact.proj( + interval_list_name="artifact_id" + ).fetch(as_dict=True) + # convert interval_list_name from artifact uuid to string + for key_i in artifact_restrict: + key_i["interval_list_name"] = str( + key_i["interval_list_name"] + ) + if "interval_list_name" in key_v1: + key_v1.pop( + "interval_list_name" + ) # pop the interval list since artifact intervals are now the restriction + # Spike sorting restriction + table = ( + (SpikeSortingSelection() * table.proj()) + & artifact_restrict + & key_v1 + ) + else: + # use the supplied interval to restrict + table = (SpikeSortingSelection() * table.proj()) & key_v1 + # Metric Curation restriction + headings = MetricCurationSelection.heading.names + headings.pop( + headings.index("curation_id") + ) # this is the parent curation id of the final entry. dont restrict by this name here + # metric curation is an optional process. only do this join if the headings are present in the key + if any([heading in key_v1 for heading in headings]): + table = ( + MetricCurationSelection().proj(*headings) * table + ) & key_v1 + # get curations + table = (CurationV1() * table) & key_v1 + table = SpikeSortingOutput().CurationV1() & table + merge_ids.extend(table.fetch("merge_id", as_dict=as_dict)) + + if "v0" in sources: + if restrict_by_artifact: + logger.warning( + 'V0 requires artifact restrict. Ignoring "restrict_by_artifact" flag.' + ) + key_v0 = key.copy() + if "sort_interval" not in key_v0 and "interval_list_name" in key_v0: + key_v0["sort_interval"] = key_v0["interval_list_name"] + _ = key_v0.pop("interval_list_name") + merge_ids.extend( + (SpikeSortingOutput.CuratedSpikeSorting() & key_v0).fetch( + "merge_id", as_dict=as_dict + ) + ) + + return merge_ids + @classmethod def get_recording(cls, key): """get the recording associated with a spike sorting output""" @@ -68,6 +162,27 @@ def get_sorting(cls, key): query = source_table & cls.merge_get_part(key) return query.get_sorting(query.fetch("KEY")) + @classmethod + def get_sort_group_info(cls, key): + """get the sort group info associated with a spike sorting output + (e.g. electrode location, brain region, etc.) + Parameters: + ----------- + key : dict + dictionary specifying the restriction (note: multi-source not currently supported) + Returns: + ------- + sort_group_info : Table + Table linking a merge id to information about the electrode group. + """ + source_table = source_class_dict[ + to_camel_case(cls.merge_get_parent(key).table_name) + ] + part_table = cls.merge_get_part(key) + query = source_table & part_table + sort_group_info = source_table.get_sort_group_info(query.fetch("KEY")) + return part_table * sort_group_info # join the info with merge id's + def get_spike_times(self, key): spike_times = [] for nwb_file in self.fetch_nwb(key): diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index d960bb796..dab6ed740 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -6,11 +6,11 @@ import warnings from pathlib import Path from typing import List -from packaging import version import datajoint as dj import numpy as np import spikeinterface as si +from packaging import version if version.parse(si.__version__) < version.parse("0.99.1"): raise ImportError( @@ -20,6 +20,7 @@ import spikeinterface.preprocessing as sip import spikeinterface.qualitymetrics as sq +from spyglass.common import BrainRegion, Electrode from spyglass.common.common_interval import IntervalList from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.settings import waveforms_dir @@ -32,6 +33,7 @@ ) from spyglass.utils import SpyglassMixin, logger +from .spikesorting_recording import SortGroup from .spikesorting_sorting import SpikeSorting schema = dj.schema("spikesorting_curation") @@ -1034,6 +1036,37 @@ def get_sorting(cls, key): sorting_key = (cls & key).fetch1("KEY") return Curation.get_curated_sorting(sorting_key) + @classmethod + def get_sort_group_info(cls, key): + """Returns the sort group information for the curation + (e.g. brain region, electrode placement, etc.) + + Parameters + ---------- + key : dict + restriction on CuratedSpikeSorting table + + Returns + ------- + sort_group_info : Table + Table with information about the sort groups + """ + electrode_restrict_list = [] + for entry in cls & key: + # Just take one electrode entry per sort group + electrode_restrict_list.extend( + ((SortGroup.SortGroupElectrode() & entry) * Electrode).fetch( + limit=1 + ) + ) + # Run joins with the tables with info and return + sort_group_info = ( + (Electrode & electrode_restrict_list) + * (cls & key) + * SortGroup.SortGroupElectrode() + ) * BrainRegion() + return sort_group_info + @schema class UnitInclusionParameters(SpyglassMixin, dj.Manual): diff --git a/src/spyglass/spikesorting/v0/spikesorting_sorting.py b/src/spyglass/spikesorting/v0/spikesorting_sorting.py index 7e4743b93..9e4d84a61 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_sorting.py +++ b/src/spyglass/spikesorting/v0/spikesorting_sorting.py @@ -204,6 +204,10 @@ def make(self, key: dict): sorter_params.pop("tempdir", None) sorter_params.pop("whiten", None) sorter_params.pop("outputs", None) + if "local_radius_um" in sorter_params: + sorter_params["radius_um"] = sorter_params.pop( + "local_radius_um" + ) # correct existing parameter sets for spikeinterface>=0.99.1 # Detect peaks for clusterless decoding detected_spikes = detect_peaks(recording, **sorter_params) diff --git a/src/spyglass/spikesorting/v1/curation.py b/src/spyglass/spikesorting/v1/curation.py index 5e862dd90..ac9c78f2b 100644 --- a/src/spyglass/spikesorting/v1/curation.py +++ b/src/spyglass/spikesorting/v1/curation.py @@ -7,9 +7,14 @@ import spikeinterface.curation as sc import spikeinterface.extractors as se +from spyglass.common import BrainRegion, Electrode from spyglass.common.common_ephys import Raw from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.spikesorting.v1.recording import SpikeSortingRecording +from spyglass.spikesorting.v1.recording import ( + SortGroup, + SpikeSortingRecording, + SpikeSortingRecordingSelection, +) from spyglass.spikesorting.v1.sorting import SpikeSorting, SpikeSortingSelection from spyglass.utils.dj_mixin import SpyglassMixin @@ -260,6 +265,42 @@ def get_merged_sorting(cls, key: dict) -> si.BaseSorting: else: return si_sorting + @classmethod + def get_sort_group_info(cls, key: dict) -> dj.Table: + """Returns the sort group information for the curation + (e.g. brain region, electrode placement, etc.) + + Parameters + ---------- + key : dict + restriction on CuratedSpikeSorting table + + Returns + ------- + sort_group_info : Table + Table with information about the sort groups + """ + table = ( + (cls & key) * SpikeSortingSelection() + ) * SpikeSortingRecordingSelection().proj( + "recording_id", "sort_group_id" + ) + electrode_restrict_list = [] + for entry in table: + # pull just one electrode from each sort group for info + electrode_restrict_list.extend( + ((SortGroup.SortGroupElectrode() & entry) * Electrode).fetch( + limit=1 + ) + ) + + sort_group_info = ( + (Electrode & electrode_restrict_list) + * table + * SortGroup.SortGroupElectrode() + ) * BrainRegion() + return (cls & key).proj() * sort_group_info + def _write_sorting_to_nwb_with_curation( sorting_id: str, diff --git a/src/spyglass/spikesorting/v1/sorting.py b/src/spyglass/spikesorting/v1/sorting.py index b650c1c5f..9e3d19d6a 100644 --- a/src/spyglass/spikesorting/v1/sorting.py +++ b/src/spyglass/spikesorting/v1/sorting.py @@ -232,6 +232,10 @@ def make(self, key: dict): sorter_params.pop("tempdir", None) sorter_params.pop("whiten", None) sorter_params.pop("outputs", None) + if "local_radius_um" in sorter_params: + sorter_params["radius_um"] = sorter_params.pop( + "local_radius_um" + ) # correct existing parameter sets for spikeinterface>=0.99.1 # Detect peaks for clusterless decoding detected_spikes = detect_peaks(recording, **sorter_params) From 6763d79e5bf86f3e857aa70335421d4e6ef75a7b Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Sun, 21 Apr 2024 09:14:55 -0700 Subject: [PATCH 22/60] Add logging of AnalysisNwbfile creation time and file size (#937) * Add logging for any func that creates AnalysisNwbfile * Migrate create to top of respective funcs * Use pathlib for file size. Bump creation time to top of in spikesort * Clear pre_create_time on create * get/del -> pop --- CHANGELOG.md | 4 +- src/spyglass/common/common_ephys.py | 11 ++- src/spyglass/common/common_nwbfile.py | 86 +++++++++++++++---- src/spyglass/common/common_position.py | 6 +- src/spyglass/decoding/v0/clusterless.py | 9 +- src/spyglass/decoding/v1/waveform_features.py | 7 ++ src/spyglass/lfp/analysis/v1/lfp_band.py | 7 +- src/spyglass/lfp/v1/lfp.py | 4 +- src/spyglass/linearization/v0/main.py | 4 +- src/spyglass/linearization/v1/main.py | 4 +- .../position/v1/position_dlc_centroid.py | 7 +- .../position/v1/position_dlc_orient.py | 7 +- .../v1/position_dlc_pose_estimation.py | 7 +- .../position/v1/position_dlc_position.py | 7 +- .../position/v1/position_dlc_selection.py | 9 +- .../position/v1/position_trodes_position.py | 5 +- .../spikesorting/v0/spikesorting_curation.py | 29 ++++--- src/spyglass/spikesorting/v1/curation.py | 8 +- .../spikesorting/v1/metric_curation.py | 12 ++- src/spyglass/spikesorting/v1/recording.py | 6 ++ src/spyglass/spikesorting/v1/sorting.py | 6 +- src/spyglass/utils/dj_mixin.py | 8 +- 22 files changed, 187 insertions(+), 66 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9ea13068..aad012b6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,8 +17,10 @@ - Avoid permission check on personnel tables. #903 - Add documentation for `SpyglassMixin`. #903 - Add helper to identify merge table by definition. #903 -- Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile #918 +- Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile + #918 - Fix potential duplicate entries in Merge part tables #922 +- Add logging of AnalysisNwbfile creation time and size #937 ### Pipelines diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index 383d12d65..1880340a9 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -411,6 +411,7 @@ class LFP(SpyglassMixin, dj.Imported): def make(self, key): # get the NWB object with the data; FIX: change to fetch with # additional infrastructure + lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) # logged rawdata = Raw().nwb_object(key) sampling_rate, interval_list_name = (Raw() & key).fetch1( @@ -465,8 +466,6 @@ def make(self, key): electrode_id_list = list(k["electrode_id"] for k in electrode_keys) electrode_id_list.sort() - lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) - lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name) ( lfp_object_id, @@ -502,6 +501,7 @@ def make(self, key): }, replace=True, ) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) def nwb_object(self, key): @@ -666,6 +666,10 @@ class LFPBand(SpyglassMixin, dj.Computed): """ def make(self, key): + # create the analysis nwb file to store the results. + lfp_band_file_name = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure lfp_object = ( LFP() & {"nwb_file_name": key["nwb_file_name"]} @@ -774,8 +778,6 @@ def make(self, key): ) return None - # create the analysis nwb file to store the results. - lfp_band_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) lfp_band_file_abspath = AnalysisNwbfile().get_abs_path( lfp_band_file_name ) @@ -853,6 +855,7 @@ def make(self, key): "previously saved lfp band times do not match current times" ) + AnalysisNwbfile().log(lfp_band_file_name, table=self.full_table_name) self.insert1(key) def fetch1_dataframe(self, *attrs, **kwargs): diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index c8f14af57..ba6b12668 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -3,12 +3,13 @@ import stat import string from pathlib import Path +from time import time import datajoint as dj +import h5py import numpy as np import pandas as pd import pynwb -import h5py import spikeinterface as si from hdmf.common import DynamicTable @@ -153,7 +154,7 @@ def cleanup(delete_files=False): class AnalysisNwbfile(SpyglassMixin, dj.Manual): definition = """ # Table for holding the NWB files that contain results of analysis, such as spike sorting. - analysis_file_name: varchar(64) # name of the file + analysis_file_name: varchar(64) # name of the file --- -> Nwbfile # name of the parent NWB file. Used for naming and metadata copy analysis_file_abs_path: filepath@analysis # the full path to the file @@ -162,11 +163,13 @@ class AnalysisNwbfile(SpyglassMixin, dj.Manual): # that span multiple NWB files INDEX (analysis_file_abs_path) """ - # NOTE the INDEX above is implicit from filepath@... above but needs to be explicit - # so that alter() can work + # NOTE the INDEX above is implicit from filepath@... + # above but needs to be explicit so that alter() can work # See #630, #664. Excessive key length. + _creation_times = {} + def create(self, nwb_file_name): """Open the NWB file, create a copy, write the copy to disk and return the name of the new file. @@ -182,6 +185,9 @@ def create(self, nwb_file_name): analysis_file_name : str The name of the new NWB file. """ + # To allow some times to occur before create + creation_time = self._creation_times.pop("pre_create_time", time()) + nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) alter_source_script = False with pynwb.NWBHDF5IO( @@ -220,6 +226,8 @@ def create(self, nwb_file_name): permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH os.chmod(analysis_file_abs_path, permissions) + self._creation_times[analysis_file_name] = creation_time + return analysis_file_name @staticmethod @@ -300,13 +308,14 @@ def add(self, nwb_file_name, analysis_file_name): analysis_file_name : str The name of the analysis NWB file that was created. """ - key = dict() - key["nwb_file_name"] = nwb_file_name - key["analysis_file_name"] = analysis_file_name - key["analysis_file_description"] = "" - key["analysis_file_abs_path"] = AnalysisNwbfile.get_abs_path( - analysis_file_name - ) + key = { + "nwb_file_name": nwb_file_name, + "analysis_file_name": analysis_file_name, + "analysis_file_description": "", + "analysis_file_abs_path": AnalysisNwbfile.get_abs_path( + analysis_file_name + ), + } self.insert1(key) @classmethod @@ -354,8 +363,8 @@ def get_abs_path(cls, analysis_nwb_file_name): def add_nwb_object( self, analysis_file_name, nwb_object, table_name="pandas_table" ): - # TODO: change to add_object with checks for object type and a name parameter, which should be specified if - # it is not an NWB container + # TODO: change to add_object with checks for object type and a name + # parameter, which should be specified if it is not an NWB container """Add an NWB object to the analysis file in the scratch area and returns the NWB object ID Parameters @@ -455,7 +464,10 @@ def add_units( metric_values = np.array( list(metrics[metric].values()) ) - # sort by unit_ids and apply that sorting to values to ensure that things go in the right order + + # sort by unit_ids and apply that sorting to values + # to ensure that things go in the right order + metric_values = metric_values[np.argsort(unit_ids)] logger.info( f"Adding metric {metric} : {metric_values}" @@ -667,4 +679,48 @@ def nightly_cleanup(): # during times when no other transactions are in progress. AnalysisNwbfile.cleanup(True) - # also check to see whether there are directories in the spikesorting folder with this + def log(self, analysis_file_name, table=None): + """Passthrough to the AnalysisNwbfileLog table. Avoid new imports.""" + if isinstance(analysis_file_name, dict): + analysis_file_name = analysis_file_name["analysis_file_name"] + time_delta = time() - self._creation_times[analysis_file_name] + file_size = Path(self.get_abs_path(analysis_file_name)).stat().st_size + + AnalysisNwbfileLog().log( + analysis_file_name=analysis_file_name, + time_delta=time_delta, + file_size=file_size, + table=table, + ) + + +@schema +class AnalysisNwbfileLog(dj.Manual): + definition = """ + id: int auto_increment + --- + dj_user: varchar(64) + -> AnalysisNwbfile + table=null: varchar(64) + timestamp = CURRENT_TIMESTAMP : timestamp + time_delta=null: float + file_size=null: float + """ + + def log(self, analysis_file_name, time_delta, file_size, table=None): + """Log the creation of an analysis NWB file. + + Parameters + ---------- + analysis_file_name : str + The name of the analysis NWB file. + """ + self.insert1( + { + "dj_user": dj.config["database.user"], + "analysis_file_name": analysis_file_name, + "time_delta": time_delta, + "file_size": file_size, + "table": table, + } + ) diff --git a/src/spyglass/common/common_position.py b/src/spyglass/common/common_position.py index feb25a2ed..382e39069 100644 --- a/src/spyglass/common/common_position.py +++ b/src/spyglass/common/common_position.py @@ -91,7 +91,9 @@ class IntervalPositionInfo(SpyglassMixin, dj.Computed): def make(self, key): logger.info(f"Computing position for: {key}") - analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) + analysis_file_name = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) raw_position = RawPosition.PosObject & key spatial_series = raw_position.fetch_nwb()[0]["raw_position"] @@ -118,6 +120,8 @@ def make(self, key): AnalysisNwbfile().add(key["nwb_file_name"], analysis_file_name) + AnalysisNwbfile().log(key, table=self.full_table_name) + self.insert1(key) @staticmethod diff --git a/src/spyglass/decoding/v0/clusterless.py b/src/spyglass/decoding/v0/clusterless.py index 6c0bcae3f..ee44075ee 100644 --- a/src/spyglass/decoding/v0/clusterless.py +++ b/src/spyglass/decoding/v0/clusterless.py @@ -135,6 +135,10 @@ class UnitMarks(SpyglassMixin, dj.Computed): """ def make(self, key): + # create a new AnalysisNwbfile and a timeseries for the marks and save + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) # get the list of mark parameters mark_param = (MarkParameters & key).fetch1() @@ -207,10 +211,6 @@ def make(self, key): timestamps, marks, mark_param["mark_param_dict"] ) - # create a new AnalysisNwbfile and a timeseries for the marks and save - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) nwb_object = pynwb.TimeSeries( name="marks", data=marks, @@ -222,6 +222,7 @@ def make(self, key): key["analysis_file_name"], nwb_object ) AnalysisNwbfile().add(key["nwb_file_name"], key["analysis_file_name"]) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) def fetch1_dataframe(self) -> pd.DataFrame: diff --git a/src/spyglass/decoding/v1/waveform_features.py b/src/spyglass/decoding/v1/waveform_features.py index 59a44efc1..1d1f0fe48 100644 --- a/src/spyglass/decoding/v1/waveform_features.py +++ b/src/spyglass/decoding/v1/waveform_features.py @@ -1,5 +1,6 @@ import os from itertools import chain +from time import time import datajoint as dj import numpy as np @@ -102,6 +103,7 @@ class UnitWaveformFeatures(SpyglassMixin, dj.Computed): """ def make(self, key): + AnalysisNwbfile()._creation_times["pre_create_time"] = time() # get the list of feature parameters params = (WaveformFeaturesParams & key).fetch1("params") @@ -390,4 +392,9 @@ def _write_waveform_features_to_nwb( units_object_id = nwbf.units.object_id io.write(nwbf) + AnalysisNwbfile().log( + analysis_nwb_file, + table="`decoding_waveform_features`.`__unit_waveform_features`", + ) + return analysis_nwb_file, units_object_id diff --git a/src/spyglass/lfp/analysis/v1/lfp_band.py b/src/spyglass/lfp/analysis/v1/lfp_band.py index c7357630c..d77148610 100644 --- a/src/spyglass/lfp/analysis/v1/lfp_band.py +++ b/src/spyglass/lfp/analysis/v1/lfp_band.py @@ -174,6 +174,10 @@ class LFPBandV1(SpyglassMixin, dj.Computed): """ def make(self, key): + # create the analysis nwb file to store the results. + lfp_band_file_name = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure lfp_key = {"merge_id": key["lfp_merge_id"]} lfp_object = (LFPOutput & lfp_key).fetch_nwb()[0]["lfp"] @@ -276,8 +280,6 @@ def make(self, key): ) return None - # create the analysis nwb file to store the results. - lfp_band_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) lfp_band_file_abspath = AnalysisNwbfile().get_abs_path( lfp_band_file_name ) @@ -362,6 +364,7 @@ def make(self, key): "previously saved lfp band times do not match current times" ) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) def fetch1_dataframe(self, *attrs, **kwargs): diff --git a/src/spyglass/lfp/v1/lfp.py b/src/spyglass/lfp/v1/lfp.py index 308bb14ea..097e67c23 100644 --- a/src/spyglass/lfp/v1/lfp.py +++ b/src/spyglass/lfp/v1/lfp.py @@ -58,6 +58,7 @@ class LFPV1(SpyglassMixin, dj.Computed): """ def make(self, key): + lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) # logged # get the NWB object with the data nwbf_key = {"nwb_file_name": key["nwb_file_name"]} rawdata = (Raw & nwbf_key).fetch_nwb()[0]["raw"] @@ -127,8 +128,6 @@ def make(self, key): electrode_id_list = list(k["electrode_id"] for k in electrode_keys) electrode_id_list.sort() - lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) - lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name) ( lfp_object_id, @@ -193,6 +192,7 @@ def make(self, key): orig_key["analysis_file_name"] = lfp_file_name orig_key["lfp_object_id"] = lfp_object_id LFPOutput.insert1(orig_key) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self, *attrs, **kwargs): nwb_lfp = self.fetch_nwb()[0] diff --git a/src/spyglass/linearization/v0/main.py b/src/spyglass/linearization/v0/main.py index f08694689..cfe3a02db 100644 --- a/src/spyglass/linearization/v0/main.py +++ b/src/spyglass/linearization/v0/main.py @@ -121,7 +121,7 @@ class IntervalLinearizedPosition(SpyglassMixin, dj.Computed): def make(self, key): logger.info(f"Computing linear position for: {key}") - key["analysis_file_name"] = AnalysisNwbfile().create( + key["analysis_file_name"] = AnalysisNwbfile().create( # logged key["nwb_file_name"] ) @@ -184,5 +184,7 @@ def make(self, key): self.insert1(key) + AnalysisNwbfile().log(key, table=self.full_table_name) + def fetch1_dataframe(self): return self.fetch_nwb()[0]["linearized_position"].set_index("time") diff --git a/src/spyglass/linearization/v1/main.py b/src/spyglass/linearization/v1/main.py index a45ffe03e..b9ca2347d 100644 --- a/src/spyglass/linearization/v1/main.py +++ b/src/spyglass/linearization/v1/main.py @@ -120,7 +120,7 @@ def make(self, key): position_nwb = PositionOutput().fetch_nwb( {"merge_id": key["pos_merge_id"]} )[0] - key["analysis_file_name"] = AnalysisNwbfile().create( + key["analysis_file_name"] = AnalysisNwbfile().create( # logged position_nwb["nwb_file_name"] ) position = np.asarray( @@ -181,5 +181,7 @@ def make(self, key): [orig_key], part_name=part_name, skip_duplicates=True ) + AnalysisNwbfile().log(key, table=self.full_table_name) + def fetch1_dataframe(self): return self.fetch_nwb()[0]["linearized_position"].set_index("time") diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index df2de1544..e989265da 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -141,6 +141,10 @@ def make(self, key): path=f"{output_dir.as_posix()}/log.log", print_console=False, ) as logger: + # Add to Analysis NWB file + analysis_file_name = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) logger.logger.info("-----------------------") logger.logger.info("Centroid Calculation") @@ -298,8 +302,6 @@ def make(self, key): description="video_frame_ind", comments="no comments", ) - # Add to Analysis NWB file - analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) nwb_analysis_file = AnalysisNwbfile() key.update( { @@ -319,6 +321,7 @@ def make(self, key): ) self.insert1(key) logger.logger.info("inserted entry into DLCCentroid") + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index 48f7849be..e1e5c668b 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -85,6 +85,9 @@ class DLCOrientation(SpyglassMixin, dj.Computed): def make(self, key): # Get labels to smooth from Parameters table + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) cohort_entries = DLCSmoothInterpCohort.BodyPart & key pos_df = pd.concat( { @@ -130,9 +133,6 @@ def make(self, key): final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( @@ -155,6 +155,7 @@ def make(self, key): ) self.insert1(key) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index 3a4f50eba..dfc6095a5 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -276,15 +276,15 @@ def make(self, key): idx = pd.IndexSlice for body_part, part_df in body_parts_df.items(): logger.logger.info("converting to cm") + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) part_df = convert_to_cm(part_df, meters_per_pixel) logger.logger.info("adding timestamps to DataFrame") part_df = add_timestamps( part_df, pos_time=pos_time, video_time=video_time ) key["bodypart"] = body_part - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) position = pynwb.behavior.Position() likelihood = pynwb.behavior.BehavioralTimeSeries() position.create_spatial_series( @@ -330,6 +330,7 @@ def make(self, key): analysis_file_name=key["analysis_file_name"], ) self.BodyPart.insert1(key) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch_dataframe(self, *attrs, **kwargs): entries = (self.BodyPart & self).fetch("KEY") diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 4ca199241..436d890d5 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -167,6 +167,9 @@ def make(self, key): path=f"{output_dir.as_posix()}/log.log", print_console=False, ) as logger: + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) logger.logger.info("-----------------------") idx = pd.IndexSlice # Get labels to smooth from Parameters table @@ -224,9 +227,6 @@ def make(self, key): .fetch_nwb()[0]["dlc_pose_estimation_position"] .get_spatial_series() ) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) # Add dataframe to AnalysisNwbfile nwb_analysis_file = AnalysisNwbfile() position = pynwb.behavior.Position() @@ -267,6 +267,7 @@ def make(self, key): ) self.insert1(key) logger.logger.info("inserted entry into DLCSmoothInterp") + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 50da599f6..74354db31 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -57,6 +57,10 @@ class DLCPosV1(SpyglassMixin, dj.Computed): def make(self, key): orig_key = copy.deepcopy(key) + # Add to Analysis NWB file + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) key["pose_eval_result"] = self.evaluate_pose_estimation(key) pos_nwb = (DLCCentroid & key).fetch_nwb()[0] @@ -110,10 +114,6 @@ def make(self, key): comments=vid_frame_obj.comments, ) - # Add to Analysis NWB file - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) nwb_analysis_file = AnalysisNwbfile() key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], orientation @@ -138,6 +138,7 @@ def make(self, key): PositionOutput._merge_insert( [orig_key], part_name=part_name, skip_duplicates=True ) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] diff --git a/src/spyglass/position/v1/position_trodes_position.py b/src/spyglass/position/v1/position_trodes_position.py index 80f1cb700..1a422b86f 100644 --- a/src/spyglass/position/v1/position_trodes_position.py +++ b/src/spyglass/position/v1/position_trodes_position.py @@ -160,7 +160,9 @@ def make(self, key): logger.info(f"Computing position for: {key}") orig_key = copy.deepcopy(key) - analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) + analysis_file_name = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) raw_position = RawPosition.PosObject & key spatial_series = raw_position.fetch_nwb()[0]["raw_position"] @@ -201,6 +203,7 @@ def make(self, key): PositionOutput._merge_insert( [orig_key], part_name=part_name, skip_duplicates=True ) + AnalysisNwbfile().log(key, table=self.full_table_name) @staticmethod def generate_pos_components(*args, **kwargs): diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index dab6ed740..fcfd15646 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -228,6 +228,7 @@ def save_sorting_nwb( units_object_id : str """ + analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) sort_interval_valid_times = ( IntervalList & {"interval_list_name": sort_interval_list_name} @@ -248,7 +249,6 @@ def save_sorting_nwb( units_valid_times[unit_id] = sort_interval_valid_times units_sort_interval[unit_id] = [sort_interval] - analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) object_ids = AnalysisNwbfile().add_units( analysis_file_name, units, @@ -268,6 +268,9 @@ def save_sorting_nwb( else: units_object_id = object_ids[0] + AnalysisNwbfile().log( + analysis_file_name, table="`spikesorting_curation`.`curation`" + ) return analysis_file_name, units_object_id @@ -326,6 +329,9 @@ class Waveforms(SpyglassMixin, dj.Computed): """ def make(self, key): + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) recording = Curation.get_recording(key) if recording.get_num_segments() > 1: recording = si.concatenate_recordings([recording]) @@ -351,15 +357,13 @@ def make(self, key): **waveform_params, ) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) object_id = AnalysisNwbfile().add_units_waveforms( key["analysis_file_name"], waveform_extractor=waveforms ) key["waveforms_object_id"] = object_id AnalysisNwbfile().add(key["nwb_file_name"], key["analysis_file_name"]) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) def load_waveforms(self, key: dict): @@ -512,6 +516,9 @@ class QualityMetrics(SpyglassMixin, dj.Computed): """ def make(self, key): + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) waveform_extractor = Waveforms().load_waveforms(key) qm = {} params = (MetricParameters & key).fetch1("metric_params") @@ -528,13 +535,11 @@ def make(self, key): logger.info(f"Computed all metrics: {qm}") self._dump_to_json(qm, key["quality_metrics_path"]) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) key["object_id"] = AnalysisNwbfile().add_units_metrics( key["analysis_file_name"], metrics=qm ) AnalysisNwbfile().add(key["nwb_file_name"], key["analysis_file_name"]) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) @@ -928,17 +933,21 @@ class Unit(SpyglassMixin, dj.Part): """ def make(self, key): + AnalysisNwbfile()._creation_times["pre_create_time"] = time.time() unit_labels_to_remove = ["reject"] # check that the Curation has metrics metrics = (Curation & key).fetch1("quality_metrics") if metrics == {}: - Warning( - f"Metrics for Curation {key} should normally be calculated before insertion here" + logger.warning( + f"Metrics for Curation {key} should normally be calculated " + + "before insertion here" ) sorting = Curation.get_curated_sorting(key) unit_ids = sorting.get_unit_ids() - # Get the labels for the units, add only those units that do not have 'reject' or 'noise' labels + + # Get the labels for the units, add only those units that do not have + # 'reject' or 'noise' labels unit_labels = (Curation & key).fetch1("curation_labels") accepted_units = [] for unit_id in unit_ids: diff --git a/src/spyglass/spikesorting/v1/curation.py b/src/spyglass/spikesorting/v1/curation.py index ac9c78f2b..0d6a6dcb4 100644 --- a/src/spyglass/spikesorting/v1/curation.py +++ b/src/spyglass/spikesorting/v1/curation.py @@ -1,3 +1,4 @@ +from time import time from typing import Dict, List, Union import datajoint as dj @@ -79,6 +80,8 @@ def insert_curation( ------- curation_key : dict """ + AnalysisNwbfile()._creation_times["pre_create_time"] = time() + sort_query = cls & {"sorting_id": sorting_id} parent_curation_id = max(parent_curation_id, -1) if parent_curation_id == -1: @@ -337,6 +340,7 @@ def _write_sorting_to_nwb_with_curation( nwb_file_name = (SpikeSortingSelection & {"sorting_id": sorting_id}).fetch1( "nwb_file_name" ) + analysis_nwb_file = AnalysisNwbfile().create(nwb_file_name) # get sorting sorting_analysis_file_abs_path = AnalysisNwbfile.get_abs_path( @@ -365,7 +369,6 @@ def _write_sorting_to_nwb_with_curation( unit_ids = list(units_dict.keys()) # create new analysis nwb file - analysis_nwb_file = AnalysisNwbfile().create(nwb_file_name) analysis_nwb_file_abs_path = AnalysisNwbfile.get_abs_path(analysis_nwb_file) with pynwb.NWBHDF5IO( path=analysis_nwb_file_abs_path, @@ -422,6 +425,9 @@ def _write_sorting_to_nwb_with_curation( units_object_id = nwbf.units.object_id io.write(nwbf) + AnalysisNwbfile().log( + analysis_nwb_file, table="`spikesorting_v1_sorting`.`__spike_sorting`" + ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/spikesorting/v1/metric_curation.py b/src/spyglass/spikesorting/v1/metric_curation.py index 87c692d96..1519cdb3a 100644 --- a/src/spyglass/spikesorting/v1/metric_curation.py +++ b/src/spyglass/spikesorting/v1/metric_curation.py @@ -1,5 +1,6 @@ import os import uuid +from time import time from typing import Any, Dict, List, Union import datajoint as dj @@ -203,6 +204,7 @@ class MetricCuration(SpyglassMixin, dj.Computed): """ def make(self, key): + AnalysisNwbfile()._creation_times["pre_create_time"] = time() # FETCH nwb_file_name = ( SpikeSortingSelection * MetricCurationSelection & key @@ -523,12 +525,12 @@ def _write_metric_curation_to_nwb( object_id : str object_id of the units table in the analysis NWB file """ - - unit_ids = [int(i) for i in waveforms.sorting.get_unit_ids()] - # create new analysis nwb file analysis_nwb_file = AnalysisNwbfile().create(nwb_file_name) analysis_nwb_file_abs_path = AnalysisNwbfile.get_abs_path(analysis_nwb_file) + + unit_ids = [int(i) for i in waveforms.sorting.get_unit_ids()] + with pynwb.NWBHDF5IO( path=analysis_nwb_file_abs_path, mode="a", @@ -584,4 +586,8 @@ def _write_metric_curation_to_nwb( units_object_id = nwbf.units.object_id io.write(nwbf) + AnalysisNwbfile().log( + analysis_nwb_file, + table="`spikesorting_v1_metric_curation`.`__metric_curation`", + ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/spikesorting/v1/recording.py b/src/spyglass/spikesorting/v1/recording.py index 996611d9a..b3931d264 100644 --- a/src/spyglass/spikesorting/v1/recording.py +++ b/src/spyglass/spikesorting/v1/recording.py @@ -1,4 +1,5 @@ import uuid +from time import time from typing import Iterable, List, Optional, Tuple, Union import datajoint as dj @@ -250,6 +251,7 @@ class SpikeSortingRecording(SpyglassMixin, dj.Computed): """ def make(self, key): + AnalysisNwbfile()._creation_times["pre_create_time"] = time() # DO: # - get valid times for sort interval # - proprocess recording @@ -649,6 +651,10 @@ def _write_recording_to_nwb( "ProcessedElectricalSeries" ].object_id io.write(nwbfile) + AnalysisNwbfile().log( + analysis_nwb_file, + table="`spikesorting_v1_sorting`.`__spike_sorting_recording`", + ) return analysis_nwb_file, recording_object_id diff --git a/src/spyglass/spikesorting/v1/sorting.py b/src/spyglass/spikesorting/v1/sorting.py index 9e3d19d6a..30c886d94 100644 --- a/src/spyglass/spikesorting/v1/sorting.py +++ b/src/spyglass/spikesorting/v1/sorting.py @@ -1,3 +1,4 @@ +import os import tempfile import time import uuid @@ -22,7 +23,6 @@ _consolidate_intervals, ) from spyglass.utils import SpyglassMixin, logger -import os schema = dj.schema("spikesorting_v1_sorting") @@ -152,6 +152,7 @@ def make(self, key: dict): # - information about the recording # - artifact free intervals # - spike sorter and sorter params + AnalysisNwbfile()._creation_times["pre_create_time"] = time.time() recording_key = ( SpikeSortingRecording * SpikeSortingSelection & key @@ -404,4 +405,7 @@ def _write_sorting_to_nwb( ) units_object_id = nwbf.units.object_id io.write(nwbf) + AnalysisNwbfile().log( + analysis_nwb_file, table="`spikesorting_v1_curation`.`curation_v1`" + ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 7b42088b4..02ccdb8a8 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -500,7 +500,7 @@ def _usage_table(self): return CautiousDelete() - def _log_use(self, start, merge_deletes=None, super_delete=False): + def _log_delete(self, start, merge_deletes=None, super_delete=False): """Log use of cautious_delete.""" if isinstance(merge_deletes, QueryExpression): merge_deletes = merge_deletes.fetch(as_dict=True) @@ -570,12 +570,12 @@ def cautious_delete(self, force_permission: bool = False, *args, **kwargs): self._commit_merge_deletes(merge_deletes, **kwargs) else: logger.info("Delete aborted.") - self._log_use(start) + self._log_delete(start) return super().delete(*args, **kwargs) # Additional confirm here - self._log_use(start=start, merge_deletes=merge_deletes) + self._log_delete(start=start, merge_deletes=merge_deletes) def cdel(self, force_permission=False, *args, **kwargs): """Alias for cautious_delete.""" @@ -589,7 +589,7 @@ def super_delete(self, warn=True, *args, **kwargs): """Alias for datajoint.table.Table.delete.""" if warn: logger.warning("!! Bypassing cautious_delete !!") - self._log_use(start=time(), super_delete=True) + self._log_delete(start=time(), super_delete=True) super().delete(*args, **kwargs) # ------------------------------- Export Log ------------------------------- From 664f0517adf7ea708d0598f368648902b1ba5ebf Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Mon, 22 Apr 2024 13:19:56 -0700 Subject: [PATCH 23/60] Log when file accessed (#941) * Add logging for any func that creates AnalysisNwbfile --- CHANGELOG.md | 11 ++-- CITATION.cff | 4 +- src/spyglass/common/common_nwbfile.py | 54 ++++++++++++++++--- src/spyglass/decoding/v1/waveform_features.py | 7 +-- .../spikesorting/v0/spikesorting_curation.py | 5 +- src/spyglass/spikesorting/v1/curation.py | 4 +- .../spikesorting/v1/metric_curation.py | 5 +- src/spyglass/spikesorting/v1/recording.py | 7 ++- src/spyglass/spikesorting/v1/sorting.py | 4 +- src/spyglass/utils/dj_helper_fn.py | 28 ++++++++++ src/spyglass/utils/dj_mixin.py | 1 + 11 files changed, 93 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aad012b6b..9b4f579ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,6 @@ # Change Log -## [0.5.2] (Unreleased) - -### Release Notes - - +## [0.5.2] (April 22, 2024) ### Infrastructure @@ -20,14 +16,15 @@ - Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile #918 - Fix potential duplicate entries in Merge part tables #922 -- Add logging of AnalysisNwbfile creation time and size #937 +- Add log of AnalysisNwbfile creation time, size, and access count #937, #941 ### Pipelines - Spikesorting - Update calls in v0 pipeline for spikeinterface>=0.99 #893 - Fix method type of `get_spike_times` #904 - - Add helper functions for restricting spikesorting results and linking to probe info #910 + - Add helper functions for restricting spikesorting results and linking to + probe info #910 - Decoding - Handle dimensions of clusterless `get_ahead_behind_distance` #904 - Fix improper handling of nwb file names with .strip #929 diff --git a/CITATION.cff b/CITATION.cff index c17cabb6c..6fc0e83aa 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -166,5 +166,5 @@ keywords: - spike sorting - kachery license: MIT -version: 0.5.1 -date-released: '2024-03-07' +version: 0.5.2 +date-released: '2024-04-22' diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index ba6b12668..19700d3b3 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -693,21 +693,37 @@ def log(self, analysis_file_name, table=None): table=table, ) + def increment_access(self, keys, table=None): + """Passthrough to the AnalysisNwbfileLog table. Avoid new imports.""" + if not isinstance(keys, list): + key = [keys] + + for key in keys: + AnalysisNwbfileLog().increment_access(key, table=table) + @schema class AnalysisNwbfileLog(dj.Manual): definition = """ id: int auto_increment --- - dj_user: varchar(64) -> AnalysisNwbfile - table=null: varchar(64) - timestamp = CURRENT_TIMESTAMP : timestamp - time_delta=null: float - file_size=null: float + dj_user : varchar(64) # user who created the file + timestamp = CURRENT_TIMESTAMP : timestamp # when the file was created + table = null : varchar(64) # creating table + time_delta = null : float # how long it took to create + file_size = null : float # size of the file in bytes + accessed = 0 : int # n times accessed + unique index (analysis_file_name) """ - def log(self, analysis_file_name, time_delta, file_size, table=None): + def log( + self, + analysis_file_name=None, + time_delta=None, + file_size=None, + table=None, + ): """Log the creation of an analysis NWB file. Parameters @@ -724,3 +740,29 @@ def log(self, analysis_file_name, time_delta, file_size, table=None): "table": table, } ) + + def increment_access(self, key, table=None): + """Increment the accessed field for the given analysis file name. + + Parameters + ---------- + key : Union[str, dict] + The name of the analysis NWB file, or a key to the table. + table : str, optional + The table that created the file. + """ + if isinstance(key, str): + key = {"analysis_file_name": key} + + if not (query := self & key): + self.log(**key, table=table) + entries = query.fetch(as_dict=True) + + inserts = [] + for entry in entries: + entry["accessed"] += 1 + if table and not entry.get("table"): + entry["table"] = table + inserts.append(entry) + + self.insert(inserts, replace=True) diff --git a/src/spyglass/decoding/v1/waveform_features.py b/src/spyglass/decoding/v1/waveform_features.py index 1d1f0fe48..4a999accd 100644 --- a/src/spyglass/decoding/v1/waveform_features.py +++ b/src/spyglass/decoding/v1/waveform_features.py @@ -166,6 +166,8 @@ def make(self, key): nwb_file_name, key["analysis_file_name"], ) + AnalysisNwbfile().log(key, table=self.full_table_name) + self.insert1(key) @staticmethod @@ -392,9 +394,4 @@ def _write_waveform_features_to_nwb( units_object_id = nwbf.units.object_id io.write(nwbf) - AnalysisNwbfile().log( - analysis_nwb_file, - table="`decoding_waveform_features`.`__unit_waveform_features`", - ) - return analysis_nwb_file, units_object_id diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index fcfd15646..e3c0a6fd6 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -268,9 +268,6 @@ def save_sorting_nwb( else: units_object_id = object_ids[0] - AnalysisNwbfile().log( - analysis_file_name, table="`spikesorting_curation`.`curation`" - ) return analysis_file_name, units_object_id @@ -1003,6 +1000,8 @@ def make(self, key): unit_ids=accepted_units, labels=labels, ) + + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) # now add the units diff --git a/src/spyglass/spikesorting/v1/curation.py b/src/spyglass/spikesorting/v1/curation.py index 0d6a6dcb4..078076b51 100644 --- a/src/spyglass/spikesorting/v1/curation.py +++ b/src/spyglass/spikesorting/v1/curation.py @@ -128,6 +128,7 @@ def insert_curation( key, skip_duplicates=True, ) + AnalysisNwbfile().log(analysis_file_name, table=cls.full_table_name) return key @@ -425,9 +426,6 @@ def _write_sorting_to_nwb_with_curation( units_object_id = nwbf.units.object_id io.write(nwbf) - AnalysisNwbfile().log( - analysis_nwb_file, table="`spikesorting_v1_sorting`.`__spike_sorting`" - ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/spikesorting/v1/metric_curation.py b/src/spyglass/spikesorting/v1/metric_curation.py index 1519cdb3a..836de018d 100644 --- a/src/spyglass/spikesorting/v1/metric_curation.py +++ b/src/spyglass/spikesorting/v1/metric_curation.py @@ -276,6 +276,7 @@ def make(self, key): nwb_file_name, key["analysis_file_name"], ) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key) @classmethod @@ -586,8 +587,4 @@ def _write_metric_curation_to_nwb( units_object_id = nwbf.units.object_id io.write(nwbf) - AnalysisNwbfile().log( - analysis_nwb_file, - table="`spikesorting_v1_metric_curation`.`__metric_curation`", - ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/spikesorting/v1/recording.py b/src/spyglass/spikesorting/v1/recording.py index b3931d264..43ccd5495 100644 --- a/src/spyglass/spikesorting/v1/recording.py +++ b/src/spyglass/spikesorting/v1/recording.py @@ -284,6 +284,9 @@ def make(self, key): (SpikeSortingRecordingSelection & key).fetch1("nwb_file_name"), key["analysis_file_name"], ) + AnalysisNwbfile().log( + recording_nwb_file_name, table=self.full_table_name + ) self.insert1(key) @classmethod @@ -651,10 +654,6 @@ def _write_recording_to_nwb( "ProcessedElectricalSeries" ].object_id io.write(nwbfile) - AnalysisNwbfile().log( - analysis_nwb_file, - table="`spikesorting_v1_sorting`.`__spike_sorting_recording`", - ) return analysis_nwb_file, recording_object_id diff --git a/src/spyglass/spikesorting/v1/sorting.py b/src/spyglass/spikesorting/v1/sorting.py index 30c886d94..84a936eea 100644 --- a/src/spyglass/spikesorting/v1/sorting.py +++ b/src/spyglass/spikesorting/v1/sorting.py @@ -300,6 +300,7 @@ def make(self, key: dict): (SpikeSortingSelection & key).fetch1("nwb_file_name"), key["analysis_file_name"], ) + AnalysisNwbfile().log(key, table=self.full_table_name) self.insert1(key, skip_duplicates=True) @classmethod @@ -405,7 +406,4 @@ def _write_sorting_to_nwb( ) units_object_id = nwbf.units.object_id io.write(nwbf) - AnalysisNwbfile().log( - analysis_nwb_file, table="`spikesorting_v1_curation`.`curation_v1`" - ) return analysis_nwb_file, units_object_id diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 44321e10a..7af1fb2b4 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -6,7 +6,9 @@ import datajoint as dj import numpy as np +from datajoint.user_tables import UserTable +from spyglass.utils.dj_chains import PERIPHERAL_TABLES from spyglass.utils.logging import logger from spyglass.utils.nwb_helper_fn import get_nwb_file @@ -110,6 +112,26 @@ def dj_replace(original_table, new_values, key_column, replace_column): return original_table +def get_fetching_table_from_stack(stack): + """Get all classes from a stack of tables.""" + classes = set() + for frame_info in stack: + locals_dict = frame_info.frame.f_locals + for obj in locals_dict.values(): + if not isinstance(obj, UserTable): + continue # skip non-tables + if (name := obj.full_table_name) in PERIPHERAL_TABLES: + continue # skip common_nwbfile tables + classes.add(name) + if len(classes) > 1: + logger.warn( + f"Multiple classes found in stack: {classes}. " + "Please submit a bug report with the snippet used." + ) + classes = None # predict only one but not sure, so return None + return next(iter(classes)) if classes else None + + def get_nwb_table(query_expression, tbl, attr_name, *attrs, **kwargs): """Get the NWB file name and path from the given DataJoint query. @@ -150,6 +172,11 @@ def get_nwb_table(query_expression, tbl, attr_name, *attrs, **kwargs): query_expression * tbl.proj(nwb2load_filepath=attr_name) ).fetch(file_name_str) + if which == "analysis": # log access of analysis files to log table + AnalysisNwbfile().increment_access( + nwb_files, table=get_fetching_table_from_stack(inspect.stack()) + ) + return nwb_files, file_path_fn @@ -185,6 +212,7 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): nwb_files, file_path_fn = get_nwb_table( query_expression, tbl, attr_name, *attrs, **kwargs ) + for file_name in nwb_files: file_path = file_path_fn(file_name) if not os.path.exists(file_path): # retrieve the file from kachery. diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 02ccdb8a8..515a1ad1f 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -139,6 +139,7 @@ def fetch_nwb(self, *attrs, **kwargs): Additional logic support Export table logging. """ table, tbl_attr = self._nwb_table_tuple + if self.export_id and "analysis" in tbl_attr: tbl_pk = "analysis_file_name" fnames = (self * table).fetch(tbl_pk) From 96691c3b2a623b7350b6b2aee94d1bf03dcd3638 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Mon, 22 Apr 2024 14:50:49 -0700 Subject: [PATCH 24/60] Fix bug on empty delete in merge table (#940) * fix bug on empty delete in merge table * update changelog * fix spelling --------- Co-authored-by: Chris Brozdowski --- CHANGELOG.md | 2 ++ src/spyglass/utils/dj_merge_tables.py | 13 +++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b4f579ae..d6eda918e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ - Prioritize datajoint filepath entry for defining abs_path of analysis nwbfile #918 - Fix potential duplicate entries in Merge part tables #922 +- Add logging of AnalysisNwbfile creation time and size #937 +- Fix error on empty delete call in merge table. #940 - Add log of AnalysisNwbfile creation time, size, and access count #937, #941 ### Pipelines diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index f94645ccf..2b8aab5ef 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -799,11 +799,16 @@ def merge_populate(self, source: str, keys=None): def delete(self, force_permission=False, *args, **kwargs): """Alias for cautious_delete, overwrites datajoint.table.Table.delete""" - for part in self.merge_get_part( - restriction=self.restriction, - multi_source=True, - return_empties=False, + if not ( + parts := self.merge_get_part( + restriction=self.restriction, + multi_source=True, + return_empties=False, + ) ): + return + + for part in parts: part.delete(force_permission=force_permission, *args, **kwargs) def super_delete(self, warn=True, *args, **kwargs): From e23d1fd8c69a114fe93cfd7094c6da714c7e6fa2 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Mon, 22 Apr 2024 15:05:47 -0700 Subject: [PATCH 25/60] Remove master restriction --- .github/workflows/test-package-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-package-build.yml b/.github/workflows/test-package-build.yml index 0fb98e620..0098982cb 100644 --- a/.github/workflows/test-package-build.yml +++ b/.github/workflows/test-package-build.yml @@ -90,7 +90,7 @@ jobs: url: https://pypi.org/p/spyglass-neuro permissions: id-token: write # IMPORTANT: this permission is mandatory for trusted publishing - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && github.ref == 'refs/heads/master' + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - uses: actions/download-artifact@v3 with: From 2027abaa3483ca254de664dbf09ab208cf4a8369 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Thu, 25 Apr 2024 14:00:17 -0700 Subject: [PATCH 26/60] Add system requirements and typical installation time (#945) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index b6aad10c4..42c0e0357 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,8 @@ Documentation can be found at - For installation instructions see - [https://lorenfranklab.github.io/spyglass/latest/installation/](https://lorenfranklab.github.io/spyglass/latest/installation/) +Typical installation time is: 5-10 minutes + ## Tutorials The tutorials for `spyglass` is currently in the form of Jupyter Notebooks and @@ -81,6 +83,16 @@ for contributing instructions found at - License and Copyright notice can be found at [https://lorenfranklab.github.io/spyglass/latest/LICENSE/](https://lorenfranklab.github.io/spyglass/latest/LICENSE/) +## System requirements + +Spyglass has been tested on Linux Ubuntu 20.04 and MacOS 10.15. It has not been tested on Windows and likely will not work. + +No specific hardware requirements are needed to run spyglass. However, the amount of data that can be stored and analyzed is limited by the available disk space and memory. GPUs are required for some of the analysis tools, such as DeepLabCut. + +See [pyproject.toml](pyproject.toml), [environment.yml](environment.yml), or [environment_dlc.yml](environment_dlc.yml) for software dependencies. + +See [spec-file.txt](https://github.com/LorenFrankLab/spyglass-demo/blob/main/spec-file/spec-file.txt) for the conda environment used in the demo. + ## Citation > Lee, K.H.\*, Denovellis, E.L.\*, Ly, R., Magland, J., Soules, J., Comrie, A.E., Gramling, D.P., Guidera, J.A., Nevers, R., Adenekan, P., Brozdowski, C., Bray, S., Monroe, E., Bak, J.H., Coulter, M.E., Sun, X., Broyles, E., Shin, D., Chiang, S., Holobetz, C., Tritt, A., Rübel, O., Nguyen, T., Yatsenko, D., Chu, J., Kemere, C., Garcia, S., Buccino, A., Frank, L.M., 2024. Spyglass: a data analysis framework for reproducible and shareable neuroscience research. bioRxiv. [10.1101/2024.01.25.577295](https://doi.org/10.1101/2024.01.25.577295). From 435b0f369a890962ac9453ac860b31cb9a63c21f Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Mon, 6 May 2024 12:58:50 -0700 Subject: [PATCH 27/60] Group part delete propagation (#899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create class for group parts to help propagate deletes * spelling * update changelog * Part delete edits (#946) * Add spyglass version to created analysis nwb files (#897) * Add sg version to created analysis nwb files * update changelog * Change existing source script to spyglass version (#900) * Add pynapple support (#898) * Preliminary code * Add retrieval of file names * Add get_nwb_table function * Update docstrings * Update CHANGELOG.md * Hot fixes for clusterless `get_ahead_behind_distance` and `get_spike_times` (#904) * Squeeze results * Make method and not class method * Update CHANGELOG.md * fix bugs in fetch_nwb (#913) * Check for entry in merge part table prior to insert (#922) * check for entry in merge part table prior to insert * update changelog * Kachery fixes (#918) * Prioritize datajoint filepath for getting analysis file abs_path * remove deprecated kachery tables * update changelog * fix lint --------- Co-authored-by: Samuel Bray Co-authored-by: Eric Denovellis * remove old tables from init (#925) * Fix improper uses of strip (#929) Strip will remove leading characters * Update CHANGELOG.md * Misc Issues (#903) * #892 * #885 * #879 * Partial address of #860 * Update Changelog * Partial solve of #886 - Ask import * Fix failing tests * Add note on order of inheritace * #933 * Could not replicate fill_nan error. Reverting except clause * Export logger (#875) * WIP: rebase Export process * WIP: revise doc * ✅ : Generate working export script * Cleanup: Expand notebook, migrate export process from graph class to export * Revert dj_chains related edits * Update changelog * Revise doc * Address review comments #875 * Remove walrus in eval * prevent log on preview * Fix arg order on fetch, iterate over restr * Add upstream analysis files during cascade. Address false positive fetch * Avoid regen file list on revisit node * Bump Export.Table.restr to mediumblob * Revise Export.Table uniqueness to include export_id * Spikesorting quality of life helpers (#910) * add utitlity function for finding spikesorting merge ids * add option to select v1 sorts that didn't go through artifact detection * add option to return merge keys as dicts for future restrictions * Add tool to get brain region and electrode info for a spikesorting merge id * update changelog * style cleanup * style cleanup * fix restriction bug for curation_id * account for change or radiu_um argument name in spikeinterface * only do joins with metric curastion tables if have relevant keys in the restriction * Update tutorial to use spikesorting merge table helper functions * fix spelling * Add logging of AnalysisNwbfile creation time and file size (#937) * Add logging for any func that creates AnalysisNwbfile * Migrate create to top of respective funcs * Use pathlib for file size. Bump creation time to top of in spikesort * Clear pre_create_time on create * get/del -> pop * Log when file accessed (#941) * Add logging for any func that creates AnalysisNwbfile * Fix bug on empty delete in merge table (#940) * fix bug on empty delete in merge table * update changelog * fix spelling --------- Co-authored-by: Chris Brozdowski * Remove master restriction * Part delete takes restriction from self --------- Co-authored-by: Samuel Bray Co-authored-by: Eric Denovellis Co-authored-by: Samuel Bray Co-authored-by: Eric Denovellis * Fix linting --------- Co-authored-by: Chris Brozdowski Co-authored-by: Eric Denovellis Co-authored-by: Samuel Bray Co-authored-by: Eric Denovellis --- CHANGELOG.md | 11 +++++++++++ src/spyglass/decoding/v1/clusterless.py | 4 ++-- src/spyglass/decoding/v1/core.py | 4 ++-- src/spyglass/spikesorting/analysis/v1/group.py | 4 ++-- src/spyglass/utils/__init__.py | 4 ++-- src/spyglass/utils/dj_mixin.py | 12 ++++++++++++ 6 files changed, 31 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6eda918e..1b348f2dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Change Log +## [0.5.3] (Unreleased) + +### Release Notes + + + +### Infrastructure + +- Create class `SpyglassGroupPart` to aid delete propagations #899 + ## [0.5.2] (April 22, 2024) ### Infrastructure @@ -227,3 +237,4 @@ [0.5.0]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.0 [0.5.1]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.1 [0.5.2]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.2 +[0.5.3]: https://github.com/LorenFrankLab/spyglass/releases/tag/0.5.3 diff --git a/src/spyglass/decoding/v1/clusterless.py b/src/spyglass/decoding/v1/clusterless.py index 1c7fa830c..d7ecc5ec2 100644 --- a/src/spyglass/decoding/v1/clusterless.py +++ b/src/spyglass/decoding/v1/clusterless.py @@ -32,7 +32,7 @@ ) # noqa: F401 from spyglass.position.position_merge import PositionOutput # noqa: F401 from spyglass.settings import config -from spyglass.utils import SpyglassMixin, logger +from spyglass.utils import SpyglassMixin, SpyglassMixinPart, logger schema = dj.schema("decoding_clusterless_v1") @@ -44,7 +44,7 @@ class UnitWaveformFeaturesGroup(SpyglassMixin, dj.Manual): waveform_features_group_name: varchar(80) """ - class UnitFeatures(SpyglassMixin, dj.Part): + class UnitFeatures(SpyglassMixinPart): definition = """ -> UnitWaveformFeaturesGroup -> UnitWaveformFeatures diff --git a/src/spyglass/decoding/v1/core.py b/src/spyglass/decoding/v1/core.py index 5705726ad..79828b7db 100644 --- a/src/spyglass/decoding/v1/core.py +++ b/src/spyglass/decoding/v1/core.py @@ -13,7 +13,7 @@ restore_classes, ) from spyglass.position.position_merge import PositionOutput # noqa: F401 -from spyglass.utils import SpyglassMixin +from spyglass.utils import SpyglassMixin, SpyglassMixinPart schema = dj.schema("decoding_core_v1") @@ -94,7 +94,7 @@ class PositionGroup(SpyglassMixin, dj.Manual): position_variables = NULL: longblob # list of position variables to decode """ - class Position(SpyglassMixin, dj.Part): + class Position(SpyglassMixinPart): definition = """ -> PositionGroup -> PositionOutput.proj(pos_merge_id='merge_id') diff --git a/src/spyglass/spikesorting/analysis/v1/group.py b/src/spyglass/spikesorting/analysis/v1/group.py index 3403ad0b5..1f20a4e11 100644 --- a/src/spyglass/spikesorting/analysis/v1/group.py +++ b/src/spyglass/spikesorting/analysis/v1/group.py @@ -6,7 +6,7 @@ from spyglass.common import Session # noqa: F401 from spyglass.spikesorting.spikesorting_merge import SpikeSortingOutput -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils.dj_mixin import SpyglassMixin, SpyglassMixinPart schema = dj.schema("spikesorting_group_v1") @@ -51,7 +51,7 @@ class SortedSpikesGroup(SpyglassMixin, dj.Manual): sorted_spikes_group_name: varchar(80) """ - class Units(SpyglassMixin, dj.Part): + class Units(SpyglassMixinPart): definition = """ -> master -> SpikeSortingOutput.proj(spikesorting_merge_id='merge_id') diff --git a/src/spyglass/utils/__init__.py b/src/spyglass/utils/__init__.py index 05f316598..9ebe86491 100644 --- a/src/spyglass/utils/__init__.py +++ b/src/spyglass/utils/__init__.py @@ -1,5 +1,5 @@ from spyglass.utils.dj_merge_tables import _Merge -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils.dj_mixin import SpyglassMixin, SpyglassMixinPart from spyglass.utils.logging import logger -__all__ = ["_Merge", "SpyglassMixin", "logger"] +__all__ = ["_Merge", "SpyglassMixin", "SpyglassMixinPart", "logger"] diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 515a1ad1f..05f510193 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -737,3 +737,15 @@ def file_like(self, name=None, **kwargs): logger.error(f"No file-like field found in {self.full_table_name}") return return self & f"{attr} LIKE '%{name}%'" + + +class SpyglassMixinPart(SpyglassMixin, dj.Part): + """ + A part table for Spyglass Group tables. Assists in propagating + delete calls from upstreeam tables to downstream tables. + """ + + def delete(self, *args, **kwargs): + """Delete master and part entries.""" + restriction = self.restriction or True # for (tbl & restr).delete() + (self.master & restriction).delete(*args, **kwargs) From 35335fbc339d1b7fbfb64bc54947a7bcbf31ad62 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Tue, 7 May 2024 15:35:40 -0700 Subject: [PATCH 28/60] Fix bug report template html (#955) * Fix bug report template html * Update changelog --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index ea044c956..829fd4590 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -18,7 +18,7 @@ Steps to reproduce the behavior: 3. Scroll down to '....' 4. See error -Error Stack +
Error Stack ```python # Paste the error stack trace here diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b348f2dc..3f09af8c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Infrastructure - Create class `SpyglassGroupPart` to aid delete propagations #899 +- Fix bug report template #955 ## [0.5.2] (April 22, 2024) From faf8a8014654b95c48b7786f5fba0ed9ff58e604 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 8 May 2024 10:32:45 -0700 Subject: [PATCH 29/60] fix curation fetch bug in make function (#960) --- src/spyglass/spikesorting/v0/spikesorting_curation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index e3c0a6fd6..acdebe352 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -513,10 +513,13 @@ class QualityMetrics(SpyglassMixin, dj.Computed): """ def make(self, key): - key["analysis_file_name"] = AnalysisNwbfile().create( # logged + analysis_file_name = AnalysisNwbfile().create( # logged key["nwb_file_name"] ) waveform_extractor = Waveforms().load_waveforms(key) + key["analysis_file_name"] = ( + analysis_file_name # add to key here to prevent fetch errors + ) qm = {} params = (MetricParameters & key).fetch1("metric_params") for metric_name, metric_params in params.items(): From a508b57fc507d787c1a0b8279c0cc8d28212ee99 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 09:24:19 -0700 Subject: [PATCH 30/60] Fix tests/doc build (#967) * Fix errored test teardown * Docs build pin, docstring fixes * Update overview page * Update changelog * Fix 0.X -> 1.X, remove sponsor-only option --- CHANGELOG.md | 1 + docs/build-docs.sh | 2 +- docs/mkdocs.yml | 2 ++ docs/src/misc/index.md | 4 +++- pyproject.toml | 2 +- src/spyglass/common/common_interval.py | 4 ++-- src/spyglass/position/v1/dlc_reader.py | 11 ++++++++--- src/spyglass/settings.py | 18 +++++++++--------- tests/utils/test_mixin.py | 2 -- 9 files changed, 27 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f09af8c0..5e27b7bb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 +- Pin `mkdocstring-python` to `1.9.0`, fix existing docstrings. #967 ## [0.5.2] (April 22, 2024) diff --git a/docs/build-docs.sh b/docs/build-docs.sh index bb9fa154a..50d44f511 100755 --- a/docs/build-docs.sh +++ b/docs/build-docs.sh @@ -7,7 +7,7 @@ cp ./CHANGELOG.md ./docs/src/ cp ./LICENSE ./docs/src/LICENSE.md mkdir -p ./docs/src/notebooks -rm -r ./docs/src/notebooks/* +rm -fr ./docs/src/notebooks/* cp ./notebooks/*ipynb ./docs/src/notebooks/ cp ./notebooks/*md ./docs/src/notebooks/ mv ./docs/src/notebooks/README.md ./docs/src/notebooks/index.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 920b646a7..1aaaa437e 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -74,6 +74,7 @@ nav: - FigURL: misc/figurl_views.md - Session Groups: misc/session_groups.md - Insert Data: misc/insert_data.md + - Mixin: misc/mixin.md - Merge Tables: misc/merge_tables.md - Database Management: misc/database_management.md - Export: misc/export.md @@ -100,6 +101,7 @@ plugins: default_handler: python handlers: python: + paths: [src] options: members_order: source group_by_category: false diff --git a/docs/src/misc/index.md b/docs/src/misc/index.md index 9b3991cb6..b9971a81c 100644 --- a/docs/src/misc/index.md +++ b/docs/src/misc/index.md @@ -3,7 +3,9 @@ This folder contains miscellaneous supporting files documentation. - [Database Management](./database_management.md) +- [Export](./export.md) - [figurl Views](./figurl_views.md) -- [insert Data](./insert_data.md) +- [Insert Data](./insert_data.md) - [Merge Tables](./merge_tables.md) +- [Mixin Class](./mixin.md) - [Session Groups](./session_groups.md) diff --git a/pyproject.toml b/pyproject.toml index ffb8d0df6..28cc12633 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,7 @@ docs = [ "mkdocs-jupyter", # Docs render notebooks "mkdocs-literate-nav", # Dynamic page list for API docs "mkdocs-material", # Docs theme - "mkdocstrings[python]", # Docs API docstrings + "mkdocstrings[python]<=1.9.0", # Docs API docstrings ] [tool.hatch.version] diff --git a/src/spyglass/common/common_interval.py b/src/spyglass/common/common_interval.py index 39c676f5a..66e82bda8 100644 --- a/src/spyglass/common/common_interval.py +++ b/src/spyglass/common/common_interval.py @@ -256,6 +256,8 @@ def consolidate_intervals(interval_list): def interval_list_intersect(interval_list1, interval_list2, min_length=0): """Finds the intersections between two interval lists + Each interval is (start time, stop time) + Parameters ---------- interval_list1 : np.array, (N,2) where N = number of intervals @@ -263,8 +265,6 @@ def interval_list_intersect(interval_list1, interval_list2, min_length=0): min_length : float, optional. Minimum length of intervals to include, default 0 - Each interval is (start time, stop time) - Returns ------- interval_list: np.array, (N,2) diff --git a/src/spyglass/position/v1/dlc_reader.py b/src/spyglass/position/v1/dlc_reader.py index 8d6c18c23..c2e56063f 100644 --- a/src/spyglass/position/v1/dlc_reader.py +++ b/src/spyglass/position/v1/dlc_reader.py @@ -161,10 +161,15 @@ def read_yaml(fullpath, filename="*"): Parameters ---------- - fullpath: String or pathlib path. Directory with yaml files - filename: String. Filename, no extension. Permits wildcards. + fullpath: Union[str, pathlib.Path] + Directory with yaml files + filename: str + Filename, no extension. Permits wildcards. - Returns filepath and contents as dict + Returns + ------- + tuple + filepath and contents as dict """ from deeplabcut.utils.auxiliaryfunctions import read_config diff --git a/src/spyglass/settings.py b/src/spyglass/settings.py index be2912c9d..d9d469bba 100644 --- a/src/spyglass/settings.py +++ b/src/spyglass/settings.py @@ -20,7 +20,7 @@ class SpyglassConfig: facilitate testing. """ - def __init__(self, base_dir: str = None, **kwargs): + def __init__(self, base_dir: str = None, **kwargs) -> None: """ Initializes a new instance of the class. @@ -103,7 +103,7 @@ def load_config( force_reload=False, on_startup: bool = False, **kwargs, - ): + ) -> None: """ Loads the configuration settings for the object. @@ -223,25 +223,25 @@ def load_config( return self._config - def _load_env_vars(self): + def _load_env_vars(self) -> dict: loaded_dict = {} for var, val in self.env_defaults.items(): loaded_dict[var] = os.getenv(var, val) return loaded_dict - def _set_env_with_dict(self, env_dict): + def _set_env_with_dict(self, env_dict) -> None: # NOTE: Kept for backwards compatibility. Should be removed in future # for custom paths. Keep self.env_defaults. for var, val in env_dict.items(): os.environ[var] = str(val) - def _mkdirs_from_dict_vals(self, dir_dict): + def _mkdirs_from_dict_vals(self, dir_dict) -> None: if self._debug_mode: return for dir_str in dir_dict.values(): Path(dir_str).mkdir(exist_ok=True) - def _set_dj_config_stores(self, check_match=True, set_stores=True): + def _set_dj_config_stores(self, check_match=True, set_stores=True) -> None: """ Checks dj.config['stores'] match resolved dirs. Ensures stores set. @@ -287,7 +287,7 @@ def _set_dj_config_stores(self, check_match=True, set_stores=True): return - def dir_to_var(self, dir: str, dir_type: str = "spyglass"): + def dir_to_var(self, dir: str, dir_type: str = "spyglass") -> str: """Converts a dir string to an env variable name.""" return f"{dir_type.upper()}_{dir.upper()}_DIR" @@ -300,7 +300,7 @@ def _generate_dj_config( database_port: int = 3306, database_use_tls: bool = True, **kwargs, - ): + ) -> dict: """Generate a datajoint configuration file. Parameters @@ -345,7 +345,7 @@ def save_dj_config( base_dir=None, set_password=True, **kwargs, - ): + ) -> None: """Set the dj.config parameters, set password, and save config to file. Parameters diff --git a/tests/utils/test_mixin.py b/tests/utils/test_mixin.py index ac5c74bfe..faa823c8e 100644 --- a/tests/utils/test_mixin.py +++ b/tests/utils/test_mixin.py @@ -15,8 +15,6 @@ class Mixin(SpyglassMixin, dj.Manual): yield Mixin - Mixin().drop_quick() - @pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") def test_bad_prefix(caplog, dj_conn, Mixin): From 280406ba39b5be6d4255735d1805f1cfea4cee3a Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 12:18:43 -0500 Subject: [PATCH 31/60] Pin `mkdocstrings-python<=1.9.0` --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 28cc12633..2de8b3244 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,8 @@ docs = [ "mkdocs-jupyter", # Docs render notebooks "mkdocs-literate-nav", # Dynamic page list for API docs "mkdocs-material", # Docs theme - "mkdocstrings[python]<=1.9.0", # Docs API docstrings + "mkdocstrings[python]", # Docs API docstrings + "mkdocstrings-python<=1.9.0" # Pinned #976 ] [tool.hatch.version] From fcde4c7f77d213896cdbfdae308d7fe8baffa67f Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 11:56:33 -0700 Subject: [PATCH 32/60] Fix relative pathing for mkdocstrings>=1.9.1 (#968) * Fix relative pathing for mkdocstrings>=1.9.1 * Update changelog --- CHANGELOG.md | 2 +- docs/mkdocs.yml | 2 +- docs/src/api/make_pages.py | 5 ++++- pyproject.toml | 1 - 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e27b7bb9..0644c3fc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 -- Pin `mkdocstring-python` to `1.9.0`, fix existing docstrings. #967 +- Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 ## [0.5.2] (April 22, 2024) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1aaaa437e..acec4f829 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -101,12 +101,12 @@ plugins: default_handler: python handlers: python: - paths: [src] options: members_order: source group_by_category: false line_length: 80 docstring_style: numpy + paths: [../src] - literate-nav: nav_file: navigation.md - exclude-search: diff --git a/docs/src/api/make_pages.py b/docs/src/api/make_pages.py index 6886d50f4..d324919ce 100644 --- a/docs/src/api/make_pages.py +++ b/docs/src/api/make_pages.py @@ -16,8 +16,11 @@ if path.stem in ignored_stems or "cython" in path.stem: continue rel_path = path.relative_to("src/spyglass") + + # parts[0] is the src directory, ignore as of mkdocstrings-python 1.9.1 + module_path = ".".join([p for p in path.with_suffix("").parts[1:]]) + with mkdocs_gen_files.open(f"api/{rel_path.with_suffix('')}.md", "w") as f: - module_path = ".".join([p for p in path.with_suffix("").parts]) print(f"::: {module_path}", file=f) nav[rel_path.parts] = f"{rel_path.with_suffix('')}.md" diff --git a/pyproject.toml b/pyproject.toml index 2de8b3244..ffb8d0df6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,6 @@ docs = [ "mkdocs-literate-nav", # Dynamic page list for API docs "mkdocs-material", # Docs theme "mkdocstrings[python]", # Docs API docstrings - "mkdocstrings-python<=1.9.0" # Pinned #976 ] [tool.hatch.version] From 2f6634b740c0c2fef71108a24ce8e34cbef473f6 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 12:01:50 -0700 Subject: [PATCH 33/60] Long distance restrictions (#949) * initial commit for restrict_from_upstream * Add tests for RestrGraph * WIP: ABC for RestrGraph * WIP: ABC for RestrGraph 2 * WIP: ABC for RestrGraph 3 * WIP: Operator for 'find upstream key' * WIP: Handle all alias cases in _bridge_restr * WIP: Add tests * WIP: Cascade through merge tables * WIP: add docs * WIP: Revise tests * WIP: Add way to ban item from search * Revert pytest options * Fix failing tests * Bail on cascade if restr empty * Update src/spyglass/utils/dj_mixin.py Co-authored-by: Samuel Bray * Permit dict/list-of-dict restr on long-distance restrict --------- Co-authored-by: Sam Bray Co-authored-by: Eric Denovellis --- CHANGELOG.md | 1 + docs/src/misc/mixin.md | 61 +- src/spyglass/utils/dj_chains.py | 373 -------- src/spyglass/utils/dj_graph.py | 1194 ++++++++++++++++++++----- src/spyglass/utils/dj_helper_fn.py | 28 +- src/spyglass/utils/dj_merge_tables.py | 71 +- src/spyglass/utils/dj_mixin.py | 231 ++++- src/spyglass/utils/nwb_helper_fn.py | 2 +- tests/common/test_device.py | 2 +- tests/conftest.py | 365 +++++++- tests/container.py | 2 +- tests/lfp/conftest.py | 133 --- tests/lfp/test_lfp.py | 5 - tests/linearization/conftest.py | 142 --- tests/linearization/test_lin.py | 2 +- tests/utils/__init__.py | 0 tests/utils/conftest.py | 241 ++++- tests/utils/test_chains.py | 25 +- tests/utils/test_graph.py | 143 +++ tests/utils/test_mixin.py | 43 +- 20 files changed, 2057 insertions(+), 1007 deletions(-) delete mode 100644 src/spyglass/utils/dj_chains.py delete mode 100644 tests/linearization/conftest.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/test_graph.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0644c3fc9..231e328d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 +- Add long-distance restrictions via `<<` and `>>` operators. #943 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 ## [0.5.2] (April 22, 2024) diff --git a/docs/src/misc/mixin.md b/docs/src/misc/mixin.md index 747a12f9f..6b3884551 100644 --- a/docs/src/misc/mixin.md +++ b/docs/src/misc/mixin.md @@ -4,6 +4,7 @@ The Spyglass Mixin provides a way to centralize all Spyglass-specific functionalities that have been added to DataJoint tables. This includes... - Fetching NWB files +- Long-distance restrictions. - Delete functionality, including permission checks and part/master pairs - Export logging. See [export doc](export.md) for more information. @@ -11,16 +12,14 @@ To add this functionality to your own tables, simply inherit from the mixin: ```python import datajoint as dj + from spyglass.utils import SpyglassMixin -schema = dj.schema('my_schema') +schema = dj.schema("my_schema") -@schema -class MyOldTable(dj.Manual): - pass @schema -class MyNewTable(SpyglassMixin, dj.Manual):) +class MyOldTable(dj.Manual): pass ``` @@ -44,6 +43,58 @@ should be fetched from `Nwbfile` or an analysis file should be fetched from `AnalysisNwbfile`. If neither is foreign-key-referenced, the function will refer to a `_nwb_table` attribute. +## Long-Distance Restrictions + +In complicated pipelines like Spyglass, there are often tables that 'bury' their +foreign keys as secondary keys. This is done to avoid having to pass a long list +of foreign keys through the pipeline, potentially hitting SQL limits (see also +[Merge Tables](./merge_tables.md)). This burrying makes it difficult to restrict +a given table by familiar attributes. + +Spyglass provides a function, `restrict_by`, to handle this. The function takes +your restriction and checks parents/children until the restriction can be +applied. Spyglass introduces `<<` as a shorthand for `restrict_by` an upstream +key and `>>` as a shorthand for `restrict_by` a downstream key. + +```python +from spyglass.example import AnyTable + +AnyTable >> 'downsteam_attribute="value"' +AnyTable << 'upstream_attribute="value"' + +# Equivalent to +AnyTable.restrict_by('upstream_attribute="value"', direction="up") +AnyTable.restrict_by('downsteam_attribute="value"', direction="down") +``` + +Some caveats to this function: + +1. 'Peripheral' tables, like `IntervalList` and `AnalysisNwbfile` make it hard + to determine the correct parent/child relationship and have been removed + from this search. +2. This function will raise an error if it attempts to check a table that has + not been imported into the current namespace. It is best used for exploring + and debugging, not for production code. +3. It's hard to determine the attributes in a mixed dictionary/string + restriction. If you are having trouble, try using a pure string + restriction. +4. The most direct path to your restriction may not be the path took, especially + when using Merge Tables. When the result is empty see the warning about the + path used. Then, ban tables from the search to force a different path. + +```python +my_table = MyTable() # must be instantced +my_table.ban_search_table(UnwantedTable1) +my_table.ban_search_table([UnwantedTable2, UnwantedTable3]) +my_table.unban_search_table(UnwantedTable3) +my_table.see_banned_tables() + +my_table << my_restriction +``` + +When providing a restriction of the parent, use 'up' direction. When providing a +restriction of the child, use 'down' direction. + ## Delete Functionality The mixin overrides the default `delete` function to provide two additional diff --git a/src/spyglass/utils/dj_chains.py b/src/spyglass/utils/dj_chains.py deleted file mode 100644 index fe9cebc02..000000000 --- a/src/spyglass/utils/dj_chains.py +++ /dev/null @@ -1,373 +0,0 @@ -from collections import OrderedDict -from functools import cached_property -from typing import List, Union - -import datajoint as dj -import networkx as nx -from datajoint.expression import QueryExpression -from datajoint.table import Table -from datajoint.utils import get_master, to_camel_case - -from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK -from spyglass.utils.logging import logger - -# Tables that should be excluded from the undirected graph when finding paths -# to maintain valid joins. -PERIPHERAL_TABLES = [ - "`common_interval`.`interval_list`", - "`common_nwbfile`.`__analysis_nwbfile_kachery`", - "`common_nwbfile`.`__nwbfile_kachery`", - "`common_nwbfile`.`analysis_nwbfile_kachery_selection`", - "`common_nwbfile`.`analysis_nwbfile_kachery`", - "`common_nwbfile`.`analysis_nwbfile`", - "`common_nwbfile`.`kachery_channel`", - "`common_nwbfile`.`nwbfile_kachery_selection`", - "`common_nwbfile`.`nwbfile_kachery`", - "`common_nwbfile`.`nwbfile`", -] - - -class TableChains: - """Class for representing chains from parent to Merge table via parts. - - Functions as a plural version of TableChain, allowing a single `join` - call across all chains from parent -> Merge table. - - Attributes - ---------- - parent : Table - Parent or origin of chains. - child : Table - Merge table or destination of chains. - connection : datajoint.Connection, optional - Connection to database used to create FreeTable objects. Defaults to - parent.connection. - part_names : List[str] - List of full table names of child parts. - chains : List[TableChain] - List of TableChain objects for each part in child. - has_link : bool - Cached attribute to store whether parent is linked to child via any of - child parts. False if (a) child is not in parent.descendants or (b) - nx.NetworkXNoPath is raised by nx.shortest_path for all chains. - - Methods - ------- - __init__(parent, child, connection=None) - Initialize TableChains with parent and child tables. - __repr__() - Return full representation of chains. - Multiline parent -> child for each chain. - __len__() - Return number of chains with links. - __getitem__(index: Union[int, str]) - Return TableChain object at index, or use substring of table name. - join(restriction: str = None) - Return list of joins for each chain in self.chains. - """ - - def __init__(self, parent, child, connection=None): - self.parent = parent - self.child = child - self.connection = connection or parent.connection - parts = child.parts(as_objects=True) - self.part_names = [part.full_table_name for part in parts] - self.chains = [TableChain(parent, part) for part in parts] - self.has_link = any([chain.has_link for chain in self.chains]) - - def __repr__(self): - return "\n".join([str(chain) for chain in self.chains]) - - def __len__(self): - return len([c for c in self.chains if c.has_link]) - - @property - def max_len(self): - """Return length of longest chain.""" - return max([len(chain) for chain in self.chains]) - - def __getitem__(self, index: Union[int, str]): - """Return FreeTable object at index.""" - if isinstance(index, str): - for i, part in enumerate(self.part_names): - if index in part: - return self.chains[i] - return self.chains[index] - - def join(self, restriction=None) -> List[QueryExpression]: - """Return list of joins for each chain in self.chains.""" - restriction = restriction or self.parent.restriction or True - joins = [] - for chain in self.chains: - if joined := chain.join(restriction): - joins.append(joined) - return joins - - -class TableChain: - """Class for representing a chain of tables. - - A chain is a sequence of tables from parent to child identified by - networkx.shortest_path. Parent -> Merge should use TableChains instead to - handle multiple paths to the respective parts of the Merge table. - - Attributes - ---------- - parent : Table - Parent or origin of chain. - child : Table - Child or destination of chain. - _connection : datajoint.Connection, optional - Connection to database used to create FreeTable objects. Defaults to - parent.connection. - _link_symbol : str - Symbol used to represent the link between parent and child. Hardcoded - to " -> ". - has_link : bool - Cached attribute to store whether parent is linked to child. False if - child is not in parent.descendants or nx.NetworkXNoPath is raised by - nx.shortest_path. - link_type : str - 'directed' or 'undirected' based on whether path is found with directed - or undirected graph. None if no path is found. - graph : nx.DiGraph - Directed graph of parent's dependencies from datajoint.connection. - names : List[str] - List of full table names in chain. - objects : List[dj.FreeTable] - List of FreeTable objects for each table in chain. - attr_maps : List[dict] - List of attribute maps for each link in chain. - path : OrderedDict[str, Dict[str, Union[dj.FreeTable,dict]]] - Dictionary of full table names in chain. Keys are self.names - Values are a dict of free_table (self.objects) and - attr_map (dict of new_name: old_name, self.attr_map). - - Methods - ------- - __str__() - Return string representation of chain: parent -> child. - __repr__() - Return full representation of chain: parent -> {links} -> child. - __len__() - Return number of tables in chain. - __getitem__(index: Union[int, str]) - Return FreeTable object at index, or use substring of table name. - find_path(directed=True) - Returns path OrderedDict of full table names in chain. If directed is - True, uses directed graph. If False, uses undirected graph. Undirected - excludes PERIPHERAL_TABLES like interval_list, nwbfile, etc. to maintain - valid joins. - join(restriction: str = None) - Return join of tables in chain with restriction applied to parent. - """ - - def __init__(self, parent: Table, child: Table, connection=None): - self._connection = connection or parent.connection - self.graph = self._connection.dependencies - self.graph.load() - - if ( # if child is a merge table - get_master(child.full_table_name) == "" - and MERGE_PK in child.heading.names - ): - raise TypeError("Child is a merge table. Use TableChains instead.") - - self._link_symbol = " -> " - self.parent = parent - self.child = child - self.link_type = None - self._searched = False - - if child.full_table_name not in self.graph.nodes: - logger.warning( - "Can't find item in graph. Try importing: " - + f"{child.full_table_name}" - ) - self._searched = True - - def __str__(self): - """Return string representation of chain: parent -> child.""" - if not self.has_link: - return "No link" - return ( - to_camel_case(self.parent.table_name) - + self._link_symbol - + to_camel_case(self.child.table_name) - ) - - def __repr__(self): - """Return full representation of chain: parent -> {links} -> child.""" - if not self.has_link: - return "No link" - return "Chain: " + self._link_symbol.join( - [t.table_name for t in self.objects] - ) - - def __len__(self): - """Return number of tables in chain.""" - if not self.has_link: - return 0 - return len(self.names) - - def __getitem__(self, index: Union[int, str]) -> dj.FreeTable: - """Return FreeTable object at index.""" - if not self.has_link: - return None - if isinstance(index, str): - for i, name in enumerate(self.names): - if index in name: - return self.objects[i] - return self.objects[index] - - @property - def has_link(self) -> bool: - """Return True if parent is linked to child. - - If not searched, search for path. If searched and no link is found, - return False. If searched and link is found, return True. - """ - if not self._searched: - _ = self.path - return self.link_type is not None - - def pk_link(self, src, trg, data) -> float: - """Return 1 if data["primary"] else float("inf"). - - Currently unused. Preserved for future debugging. shortest_path accepts - an option weight callable parameter. - nx.shortest_path(G, source, target,weight=pk_link) - """ - return 1 if data["primary"] else float("inf") - - def find_path(self, directed=True) -> OrderedDict: - """Return list of full table names in chain. - - Parameters - ---------- - directed : bool, optional - If True, use directed graph. If False, use undirected graph. - Defaults to True. Undirected permits paths to traverse from merge - part-parent -> merge part -> merge table. Undirected excludes - PERIPHERAL_TABLES like interval_list, nwbfile, etc. - - Returns - ------- - OrderedDict - Dictionary of full table names in chain. Keys are full table names. - Values are free_table (dj.FreeTable representation) and attr_map - (dict of new_name: old_name). Attribute maps on the table upstream - of an alias node that can be used in .proj(). Returns None if no - path is found. - - Ignores numeric table names in paths, which are - 'gaps' or alias nodes in the graph. See datajoint.Diagram._make_graph - source code for comments on alias nodes. - """ - source, target = self.parent.full_table_name, self.child.full_table_name - if not directed: - self.graph = self.graph.to_undirected() - self.graph.remove_nodes_from(PERIPHERAL_TABLES) - try: - path = nx.shortest_path(self.graph, source, target) - except nx.NetworkXNoPath: - return None - except nx.NodeNotFound: - self._searched = True - return None - - ret = OrderedDict() - prev_table = None - for i, table in enumerate(path): - if table.isnumeric(): # get proj() attribute map for alias node - if not prev_table: - raise ValueError("Alias node found without prev table.") - try: - attr_map = self.graph[table][prev_table]["attr_map"] - except KeyError: # Why is this only DLCCentroid?? - attr_map = self.graph[prev_table][table]["attr_map"] - ret[prev_table]["attr_map"] = attr_map - else: - free_table = dj.FreeTable(self._connection, table) - ret[table] = {"free_table": free_table, "attr_map": {}} - prev_table = table - return ret - - @cached_property - def path(self) -> OrderedDict: - """Return list of full table names in chain.""" - if self._searched and not self.has_link: - return None - - link = None - if link := self.find_path(directed=True): - self.link_type = "directed" - elif link := self.find_path(directed=False): - self.link_type = "undirected" - self._searched = True - - return link - - @cached_property - def names(self) -> List[str]: - """Return list of full table names in chain.""" - if not self.has_link: - return None - return list(self.path.keys()) - - @cached_property - def objects(self) -> List[dj.FreeTable]: - """Return list of FreeTable objects for each table in chain. - - Unused. Preserved for future debugging. - """ - if not self.has_link: - return None - return [v["free_table"] for v in self.path.values()] - - @cached_property - def attr_maps(self) -> List[dict]: - """Return list of attribute maps for each table in chain. - - Unused. Preserved for future debugging. - """ - if not self.has_link: - return None - return [v["attr_map"] for v in self.path.values()] - - def join( - self, restriction: str = None, reverse_order: bool = False - ) -> dj.expression.QueryExpression: - """Return join of tables in chain with restriction applied to parent. - - Parameters - ---------- - restriction : str, optional - Restriction to apply to first table in the order. - Defaults to self.parent.restriction. - reverse_order : bool, optional - If True, join tables in reverse order. Defaults to False. - """ - if not self.has_link: - return None - - restriction = restriction or self.parent.restriction or True - path = ( - OrderedDict(reversed(self.path.items())) - if reverse_order - else self.path - ).copy() - - _, first_val = path.popitem(last=False) - join = first_val["free_table"] & restriction - for i, val in enumerate(path.values()): - attr_map, free_table = val["attr_map"], val["free_table"] - try: - join = (join.proj() * free_table).proj(**attr_map) - except dj.DataJointError as e: - attribute = str(e).split("attribute ")[-1] - logger.error( - f"{str(self)} at {free_table.table_name} with {attribute}" - ) - return None - return join diff --git a/src/spyglass/utils/dj_graph.py b/src/spyglass/utils/dj_graph.py index 59e7497d5..5bf3d25d0 100644 --- a/src/spyglass/utils/dj_graph.py +++ b/src/spyglass/utils/dj_graph.py @@ -3,79 +3,154 @@ NOTE: read `ft` as FreeTable and `restr` as restriction. """ -from typing import Dict, List, Union +from abc import ABC, abstractmethod +from collections.abc import KeysView +from enum import Enum +from functools import cached_property +from itertools import chain as iter_chain +from typing import Any, Dict, List, Set, Tuple, Union -from datajoint import FreeTable +import datajoint as dj +from datajoint import FreeTable, Table from datajoint.condition import make_condition -from datajoint.table import Table +from datajoint.dependencies import unite_master_parts +from datajoint.utils import get_master, to_camel_case +from networkx import ( + NetworkXNoPath, + NodeNotFound, + all_simple_paths, + shortest_path, +) +from networkx.algorithms.dag import topological_sort from tqdm import tqdm -from spyglass.common import AnalysisNwbfile from spyglass.utils import logger -from spyglass.utils.dj_helper_fn import unique_dicts +from spyglass.utils.dj_helper_fn import ( + PERIPHERAL_TABLES, + fuzzy_get, + unique_dicts, +) +from spyglass.utils.dj_merge_tables import is_merge_table -class RestrGraph: - def __init__( - self, - seed_table: Table, - table_name: str = None, - restriction: str = None, - leaves: List[Dict[str, str]] = None, - verbose: bool = False, - **kwargs, - ): - """Use graph to cascade restrictions up from leaves to all ancestors. +class Direction(Enum): + """Cascade direction enum. Calling Up returns True. Inverting flips.""" + + UP = "up" + DOWN = "down" + NONE = None + + def __str__(self): + return self.value + + def __invert__(self) -> "Direction": + """Invert the direction.""" + if self.value is None: + logger.warning("Inverting NONE direction") + return Direction.NONE + return Direction.UP if self.value == "down" else Direction.DOWN + + def __bool__(self) -> bool: + """Return True if direction is not None.""" + return self.value is not None + + +class AbstractGraph(ABC): + """Abstract class for graph traversal and restriction application. + + Inherited by... + - RestrGraph: Cascade restriction(s) through a graph + - TableChain: Takes parent and child nodes, finds the shortest path, + and applies a restriction across the path. If either parent or child + is a merge table, use TableChains instead. If either parent or child + are not provided, search_restr is required to find the path to the + missing table. + + Methods + ------- + cascade: Abstract method implemented by child classes + cascade1: Cascade a restriction up/down the graph, recursively + + Properties + ---------- + all_ft: Get all FreeTables for visited nodes with restrictions applied. + as_dict: Get visited nodes as a list of dictionaries of + {table_name: restriction} + """ + + def __init__(self, seed_table: Table, verbose: bool = False, **kwargs): + """Initialize graph and connection. Parameters ---------- seed_table : Table Table to use to establish connection and graph - table_name : str, optional - Table name of single leaf, default None - restriction : str, optional - Restriction to apply to leaf. default None - leaves : Dict[str, str], optional - List of dictionaries with keys table_name and restriction. One - entry per leaf node. Default None. verbose : bool, optional Whether to print verbose output. Default False """ - + self.seed_table = seed_table self.connection = seed_table.connection + + # Undirected graph may not be needed, but adding FT to the graph + # prevents `to_undirected` from working. If using undirected, remove + # PERIPHERAL_TABLES from the graph. self.graph = seed_table.connection.dependencies self.graph.load() self.verbose = verbose - self.cascaded = False - self.ancestors = set() - self.visited = set() self.leaves = set() - self.analysis_pk = AnalysisNwbfile().primary_key + self.visited = set() + self.to_visit = set() + self.no_visit = set() + self.cascaded = False - if table_name and restriction: - self.add_leaf(table_name, restriction) - if leaves: - self.add_leaves(leaves, show_progress=verbose) + # --------------------------- Abstract Methods --------------------------- - def __repr__(self): - l_str = ",\n\t".join(self.leaves) + "\n" if self.leaves else "" - processed = "Cascaded" if self.cascaded else "Uncascaded" - return f"{processed} RestrictionGraph(\n\t{l_str})" + @abstractmethod + def cascade(self): + """Cascade restrictions through graph.""" + raise NotImplementedError("Child class mut implement `cascade` method") - @property - def all_ft(self): - """Get restricted FreeTables from all visited nodes.""" - self.cascade() - return [self._get_ft(table, with_restr=True) for table in self.visited] + # ---------------------------- Logging Helpers ---------------------------- - @property - def leaf_ft(self): - """Get restricted FreeTables from graph leaves.""" - return [self._get_ft(table, with_restr=True) for table in self.leaves] + def _log_truncate(self, log_str: str, max_len: int = 80): + """Truncate log lines to max_len and print if verbose.""" + if not self.verbose: + return + logger.info( + log_str[:max_len] + "..." if len(log_str) > max_len else log_str + ) + + def _camel(self, table): + """Convert table name(s) to camel case.""" + if isinstance(table, KeysView): + table = list(table) + if not isinstance(table, list): + table = [table] + ret = [to_camel_case(t.split(".")[-1].strip("`")) for t in table] + return ret[0] if len(ret) == 1 else ret - def _get_node(self, table): + def _print_restr(self): + """Print restrictions for debugging.""" + for table in self.visited: + if restr := self._get_restr(table): + logger.info(f"{table}: {restr}") + + # ------------------------------ Graph Nodes ------------------------------ + + def _ensure_name(self, table: Union[str, Table] = None) -> str: + """Ensure table is a string.""" + if table is None: + return None + if isinstance(table, str): + return table + if isinstance(table, list): + return [self._ensure_name(t) for t in table] + return getattr(table, "full_table_name", None) + + def _get_node(self, table: Union[str, Table]): """Get node from graph.""" + table = self._ensure_name(table) if not (node := self.graph.nodes.get(table)): raise ValueError( f"Table {table} not found in graph." @@ -83,31 +158,47 @@ def _get_node(self, table): ) return node - def _set_node(self, table, attr="ft", value=None): + def _set_node(self, table, attr: str = "ft", value: Any = None): """Set attribute on node. General helper for various attributes.""" _ = self._get_node(table) # Ensure node exists self.graph.nodes[table][attr] = value - def _get_ft(self, table, with_restr=False): - """Get FreeTable from graph node. If one doesn't exist, create it.""" - table = table if isinstance(table, str) else table.full_table_name - restr = self._get_restr(table) if with_restr else True - if ft := self._get_node(table).get("ft"): - return ft & restr - ft = FreeTable(self.connection, table) - self._set_node(table, "ft", ft) - return ft & restr + def _get_edge(self, child: str, parent: str) -> Tuple[bool, Dict[str, str]]: + """Get edge data between child and parent. + + Used as a fallback for _bridge_restr. Required for Maser/Part links to + temporarily flip direction. + + Returns + ------- + Tuple[bool, Dict[str, str]] + Tuple of boolean indicating direction and edge data. True if child + is child of parent. + """ + child = self._ensure_name(child) + parent = self._ensure_name(parent) + + if edge := self.graph.get_edge_data(parent, child): + return False, edge + elif edge := self.graph.get_edge_data(child, parent): + return True, edge + + # Handle alias nodes. `shortest_path` doesn't work with aliases + p1 = all_simple_paths(self.graph, child, parent) + p2 = all_simple_paths(self.graph, parent, child) + paths = [p for p in iter_chain(p1, p2)] # list for error handling + for path in paths: # Ignore long and non-alias paths + if len(path) > 3 or (len(path) > 2 and not path[1].isnumeric()): + continue + return self._get_edge(path[0], path[1]) + + raise ValueError(f"{child} -> {parent} not direct path: {paths}") def _get_restr(self, table): """Get restriction from graph node.""" - table = table if isinstance(table, str) else table.full_table_name - return self._get_node(table).get("restr", "False") - - def _get_files(self, table): - """Get analysis files from graph node.""" - return self._get_node(table).get("files", []) + return self._get_node(self._ensure_name(table)).get("restr") - def _set_restr(self, table, restriction): + def _set_restr(self, table, restriction, replace=False): """Add restriction to graph node. If one exists, merge with new.""" ft = self._get_ft(table) restriction = ( # Convert to condition if list or dict @@ -115,9 +206,9 @@ def _set_restr(self, table, restriction): if not isinstance(restriction, str) else restriction ) - # orig_restr = restriction - if existing := self._get_restr(table): - if existing == restriction: + existing = self._get_restr(table) + if not replace and existing: + if restriction == existing: return join = ft & [existing, restriction] if len(join) == len(ft & existing): @@ -126,168 +217,337 @@ def _set_restr(self, table, restriction): ft, unique_dicts(join.fetch("KEY", as_dict=True)), set() ) - # if table == "`spikesorting_merge`.`spike_sorting_output`": - # __import__("pdb").set_trace() - self._set_node(table, "restr", restriction) - def get_restr_ft(self, table: Union[int, str]): - """Get restricted FreeTable from graph node. + def _get_ft(self, table, with_restr=False): + """Get FreeTable from graph node. If one doesn't exist, create it.""" + table = self._ensure_name(table) + if with_restr: + if not (restr := self._get_restr(table) or False): + self._log_truncate(f"No restriction for {table}") + else: + restr = True - Currently used. May be useful for debugging. + if not (ft := self._get_node(table).get("ft")): + ft = FreeTable(self.connection, table) + self._set_node(table, "ft", ft) - Parameters - ---------- - table : Union[int, str] - Table name or index in visited set - """ - if isinstance(table, int): - table = list(self.visited)[table] - return self._get_ft(table, with_restr=True) + return ft & restr - def _log_truncate(self, log_str, max_len=80): - """Truncate log lines to max_len and print if verbose.""" - if not self.verbose: - return - logger.info( - log_str[:max_len] + "..." if len(log_str) > max_len else log_str - ) + def _and_parts(self, table): + """Return table, its master and parts.""" + ret = [table] + if master := get_master(table): + ret.append(master) + if parts := self._get_ft(table).parts(): + ret.extend(parts) + return ret + + # ---------------------------- Graph Traversal ----------------------------- - def _child_to_parent( + def _bridge_restr( self, - child, - parent, - restriction, - attr_map=None, - primary=True, + table1: str, + table2: str, + restr: str, + direction: Direction = None, + attr_map: dict = None, + aliased: bool = None, **kwargs, - ) -> List[Dict[str, str]]: - """Given a child, child's restr, and parent, get parent's restr. + ): + """Given two tables and a restriction, return restriction for table2. + + Similar to ((table1 & restr) * table2).fetch(*table2.primary_key) + but with the ability to resolve aliases across tables. One table should + be the parent of the other. If direction or attr_map are not provided, + they will be inferred from the graph. Parameters ---------- - child : str - child table name - parent : str - parent table name - restriction : str - restriction to apply to child + table1 : str + Table name. Restriction always applied to this table. + table2 : str + Table name. Restriction pulled from this table. + restr : str + Restriction to apply to table1. + direction : Direction, optional + Direction to cascade. Default None. attr_map : dict, optional - dictionary mapping aliases across parend/child, as pulled from - DataJoint-assembled graph. Default None. Func will flip this dict - to convert from child to parent fields. - primary : bool, optional - Is parent in child's primary key? Default True. Also derived from - DataJoint-assembled graph. If True, project only primary key fields - to avoid secondary key collisions. + dictionary mapping aliases across tables, as pulled from + DataJoint-assembled graph. Default None. + Returns ------- List[Dict[str, str]] - List of dicts containing primary key fields for restricted parent - table. + List of dicts containing primary key fields for restricted table2. """ + if not all([direction, attr_map]): + dir_bool, edge = self._get_edge(table1, table2) + direction = "up" if dir_bool else "down" + attr_map = edge.get("attr_map") - # Need to flip attr_map to respect parent's fields - attr_reverse = ( - {v: k for k, v in attr_map.items() if k != v} if attr_map else {} - ) - child_ft = self._get_ft(child) - parent_ft = self._get_ft(parent).proj() - restr = restriction or self._get_restr(child_ft) or True - restr_child = child_ft & restr + ft1 = self._get_ft(table1) & restr + ft2 = self._get_ft(table2) + + if len(ft1) == 0: + return ["False"] - if primary: # Project only primary key fields to avoid collisions - join = restr_child.proj(**attr_reverse) * parent_ft - else: # Include all fields - join = restr_child.proj(..., **attr_reverse) * parent_ft + if bool(set(attr_map.values()) - set(ft1.heading.names)): + attr_map = {v: k for k, v in attr_map.items()} # reverse - ret = unique_dicts(join.fetch(*parent_ft.primary_key, as_dict=True)) + join = ft1.proj(**attr_map) * ft2 + ret = unique_dicts(join.fetch(*ft2.primary_key, as_dict=True)) - if len(ret) == len(parent_ft): - self._log_truncate(f"NULL restr {parent}") + if self.verbose: # For debugging. Not required for typical use. + result = ( + "EMPTY" + if len(ret) == 0 + else "FULL" if len(ft2) == len(ret) else "partial" + ) + path = f"{self._camel(table1)} -> {self._camel(table2)}" + self._log_truncate(f"Bridge Link: {path}: result {result}") return ret - def cascade_files(self): - """Set node attribute for analysis files.""" - for table in self.visited: - ft = self._get_ft(table) - if not set(self.analysis_pk).issubset(ft.heading.names): - continue - files = (ft & self._get_restr(table)).fetch(*self.analysis_pk) - self._set_node(table, "files", files) + def _get_next_tables(self, table: str, direction: Direction) -> Tuple: + """Get next tables/func based on direction. + + Used in cascade1 and cascade1_search to add master and parts. Direction + is intentionally omitted to force _get_edge to determine the edge for + this gap before resuming desired direction. Nextfunc is used to get + relevant parent/child tables after aliast node. + + Parameters + ---------- + table : str + Table name + direction : Direction + Direction to cascade + + Returns + ------- + Tuple[Dict[str, Dict[str, str]], Callable + Tuple of next tables and next function to get parent/child tables. + """ + G = self.graph + dir_dict = {"direction": direction} - def cascade1(self, table, restriction): - """Cascade a restriction up the graph, recursively on parents. + bonus = {} + direction = Direction(direction) + if direction == Direction.UP: + next_func = G.parents + bonus.update({part: {} for part in self._get_ft(table).parts()}) + elif direction == Direction.DOWN: + next_func = G.children + if (master_name := get_master(table)) != "": + bonus = {master_name: {}} + else: + raise ValueError(f"Invalid direction: {direction}") + + next_tables = { + k: {**v, **dir_dict} for k, v in next_func(table).items() + } + next_tables.update(bonus) + + return next_tables, next_func + + def cascade1( + self, + table: str, + restriction: str, + direction: Direction = Direction.UP, + replace=False, + count=0, + **kwargs, + ): + """Cascade a restriction up the graph, recursively on parents/children. Parameters ---------- table : str - table name + Table name restriction : str - restriction to apply + Restriction to apply + direction : Direction, optional + Direction to cascade. Default 'up' + replace : bool, optional + Replace existing restriction. Default False """ - self._set_restr(table, restriction) + if count > 100: + raise RecursionError("Cascade1: Recursion limit reached.") + + self._set_restr(table, restriction, replace=replace) self.visited.add(table) - for parent, data in self.graph.parents(table).items(): - if parent in self.visited: - continue + next_tables, next_func = self._get_next_tables(table, direction) - if parent.isnumeric(): - parent, data = self.graph.parents(parent).popitem() + self._log_truncate( + f"Checking {count:>2}: {self._camel(next_tables.keys())}" + ) + for next_table, data in next_tables.items(): + if next_table.isnumeric(): # Skip alias nodes + next_table, data = next_func(next_table).popitem() - parent_restr = self._child_to_parent( - child=table, - parent=parent, - restriction=restriction, + if ( + next_table in self.visited + or next_table in self.no_visit # Subclasses can set this + or table == next_table + ): + reason = ( + "Already saw" + if next_table in self.visited + else "Banned Tbl " + ) + self._log_truncate(f"{reason}: {self._camel(next_table)}") + continue + + next_restr = self._bridge_restr( + table1=table, + table2=next_table, + restr=restriction, **data, ) - self.cascade1(parent, parent_restr) # Parent set on recursion + if next_restr == ["False"]: # Stop cascade if empty restriction + continue - def cascade(self, show_progress=None) -> None: - """Cascade all restrictions up the graph. + self.cascade1( + table=next_table, + restriction=next_restr, + direction=direction, + replace=replace, + count=count + 1, + ) + + # ---------------------------- Graph Properties ---------------------------- + + @property + def all_ft(self): + """Get restricted FreeTables from all visited nodes. + + Topological sort logic adopted from datajoint.diagram. + """ + self.cascade() + nodes = [n for n in self.visited if not n.isnumeric()] + sorted_nodes = unite_master_parts( + list(topological_sort(self.graph.subgraph(nodes))) + ) + all_ft = [ + self._get_ft(table, with_restr=True) for table in sorted_nodes + ] + return [ft for ft in all_ft if len(ft) > 0] + + @property + def as_dict(self) -> List[Dict[str, str]]: + """Return as a list of dictionaries of table_name: restriction""" + self.cascade() + return [ + {"table_name": table, "restriction": self._get_restr(table)} + for table in self.visited + if self._get_restr(table) + ] + + +class RestrGraph(AbstractGraph): + def __init__( + self, + seed_table: Table, + table_name: str = None, + restriction: str = None, + leaves: List[Dict[str, str]] = None, + direction: Direction = "up", + cascade: bool = False, + verbose: bool = False, + **kwargs, + ): + """Use graph to cascade restrictions up from leaves to all ancestors. + + 'Leaves' are nodes with restrictions applied. Restrictions are cascaded + up/down the graph to all ancestors/descendants. If cascade is desired + in both direction, leaves/cascades should be added and run separately. + Future development could allow for direction setting on a per-leaf + basis. Parameters ---------- - show_progress : bool, optional - Show tqdm progress bar. Default to verbose setting. + seed_table : Table + Table to use to establish connection and graph + table_name : str, optional + Table name of single leaf, default None + restriction : str, optional + Restriction to apply to leaf. default None + leaves : Dict[str, str], optional + List of dictionaries with keys table_name and restriction. One + entry per leaf node. Default None. + direction : Direction, optional + Direction to cascade. Default 'up' + cascade : bool, optional + Whether to cascade restrictions up the graph on initialization. + Default False + verbose : bool, optional + Whether to print verbose output. Default False """ - if self.cascaded: - return - to_visit = self.leaves - self.visited - for table in tqdm( - to_visit, - desc="RestrGraph: cascading restrictions", - total=len(to_visit), - disable=not (show_progress or self.verbose), - ): - restr = self._get_restr(table) - self._log_truncate(f"Start {table}: {restr}") - self.cascade1(table, restr) - if not self.visited == self.ancestors: - raise RuntimeError( - "Cascade: FAIL - incomplete cascade. Please post issue." - ) + super().__init__(seed_table, verbose=verbose) - self.cascade_files() - self.cascaded = True + self.add_leaf( + table_name=table_name, restriction=restriction, direction=direction + ) + self.add_leaves(leaves) + + if cascade: + self.cascade(direction=direction) + + # --------------------------- Dunder Properties --------------------------- + + def __repr__(self): + l_str = ",\n\t".join(self.leaves) + "\n" if self.leaves else "" + processed = "Cascaded" if self.cascaded else "Uncascaded" + return f"{processed} {self.__class__.__name__}(\n\t{l_str})" + + def __getitem__(self, index: Union[int, str]): + all_ft_names = [t.full_table_name for t in self.all_ft] + return fuzzy_get(index, all_ft_names, self.all_ft) + + def __len__(self): + return len(self.all_ft) + + # ---------------------------- Public Properties -------------------------- + + @property + def leaf_ft(self): + """Get restricted FreeTables from graph leaves.""" + return [self._get_ft(table, with_restr=True) for table in self.leaves] - def add_leaf(self, table_name, restriction, cascade=False) -> None: + # ------------------------------- Add Nodes ------------------------------- + + def add_leaf( + self, table_name=None, restriction=True, cascade=False, direction="up" + ) -> None: """Add leaf to graph and cascade if requested. Parameters ---------- - table_name : str - table name of leaf - restriction : str - restriction to apply to leaf + table_name : str, optional + table name of leaf. Default None, do nothing. + restriction : str, optional + restriction to apply to leaf. Default True, no restriction. + cascade : bool, optional + Whether to cascade the restrictions up the graph. Default False. """ - new_ancestors = set(self._get_ft(table_name).ancestors()) - self.ancestors |= new_ancestors # Add to total ancestors - self.visited -= new_ancestors # Remove from visited to revisit + if not table_name: + return + + self.cascaded = False + + new_visits = ( + set(self._get_ft(table_name).ancestors()) + if direction == "up" + else set(self._get_ft(table_name).descendants()) + ) + + self.to_visit |= new_visits # Add to total ancestors + self.visited -= new_visits # Remove from visited to revisit self.leaves.add(table_name) self._set_restr(table_name, restriction) # Redundant if cascaded @@ -297,71 +557,119 @@ def add_leaf(self, table_name, restriction, cascade=False) -> None: self.cascade_files() self.cascaded = True + def _process_leaves(self, leaves=None, default_restriction=True): + """Process leaves to ensure they are unique and have required keys.""" + if not leaves: + return [] + if not isinstance(leaves, list): + leaves = [leaves] + if all(isinstance(leaf, str) for leaf in leaves): + leaves = [ + {"table_name": leaf, "restriction": default_restriction} + for leaf in leaves + ] + if all(isinstance(leaf, dict) for leaf in leaves) and not all( + leaf.get("table_name") for leaf in leaves + ): + raise ValueError(f"All leaves must have table_name: {leaves}") + + return unique_dicts(leaves) + def add_leaves( - self, leaves: List[Dict[str, str]], cascade=False, show_progress=None + self, + leaves: Union[str, List, List[Dict[str, str]]] = None, + default_restriction: str = None, + cascade=False, ) -> None: """Add leaves to graph and cascade if requested. Parameters ---------- - leaves : List[Dict[str, str]] - list of dictionaries containing table_name and restriction + leaves : Union[str, List, List[Dict[str, str]]], optional + Table names of leaves, either as a list of strings or a list of + dictionaries with keys table_name and restriction. One entry per + leaf node. Default None, do nothing. + default_restriction : str, optional + Default restriction to apply to each leaf. Default True, no + restriction. Only used if leaf missing restriction. cascade : bool, optional Whether to cascade the restrictions up the graph. Default False + """ + leaves = self._process_leaves( + leaves=leaves, default_restriction=default_restriction + ) + for leaf in leaves: + self.add_leaf( + leaf.get("table_name"), + leaf.get("restriction"), + cascade=False, + ) + if cascade: + self.cascade() + + # ------------------------------ Graph Traversal -------------------------- + + def cascade(self, show_progress=None, direction="up") -> None: + """Cascade all restrictions up the graph. + + Parameters + ---------- show_progress : bool, optional Show tqdm progress bar. Default to verbose setting. """ - - if not leaves: + if self.cascaded: return - if not isinstance(leaves, list): - leaves = [leaves] - leaves = unique_dicts(leaves) - for leaf in tqdm( - leaves, - desc="RestrGraph: adding leaves", - total=len(leaves), + + to_visit = self.leaves - self.visited + + for table in tqdm( + to_visit, + desc="RestrGraph: cascading restrictions", + total=len(to_visit), disable=not (show_progress or self.verbose), ): - if not ( - (table_name := leaf.get("table_name")) - and (restriction := leaf.get("restriction")) - ): - raise ValueError( - f"Leaf must have table_name and restriction: {leaf}" - ) - self.add_leaf(table_name, restriction, cascade=False) - if cascade: - self.cascade() - self.cascade_files() + restr = self._get_restr(table) + self._log_truncate(f"Start {table}: {restr}") + self.cascade1(table, restr, direction=direction) + + self.cascade_files() + self.cascaded = True + + # ----------------------------- File Handling ----------------------------- + + def _get_files(self, table): + """Get analysis files from graph node.""" + return self._get_node(table).get("files", []) + + def cascade_files(self): + """Set node attribute for analysis files.""" + for table in self.visited: + ft = self._get_ft(table, with_restr=True) + if not set(self.analysis_pk).issubset(ft.heading.names): + continue + files = list(ft.fetch(*self.analysis_pk)) + self._set_node(table, "files", files) @property - def as_dict(self) -> List[Dict[str, str]]: - """Return as a list of dictionaries of table_name: restriction""" - self.cascade() - return [ - {"table_name": table, "restriction": self._get_restr(table)} - for table in self.ancestors - if self._get_restr(table) - ] + def analysis_file_tbl(self) -> Table: + """Return the analysis file table. Avoids circular import.""" + from spyglass.common import AnalysisNwbfile + + return AnalysisNwbfile() + + @property + def analysis_pk(self) -> List[str]: + """Return primary key fields from analysis file table.""" + return self.analysis_file_tbl.primary_key @property def file_dict(self) -> Dict[str, List[str]]: """Return dictionary of analysis files from all visited nodes. - Currently unused, but could be useful for debugging. + Included for debugging, to associate files with tables. """ - if not self.cascaded: - logger.warning("Uncascaded graph. Using leaves only.") - table_list = self.leaves - else: - table_list = self.visited - - return { - table: self._get_files(table) - for table in table_list - if any(self._get_files(table)) - } + self.cascade() + return {t: self._get_node(t).get("files", []) for t in self.visited} @property def file_paths(self) -> List[str]: @@ -371,11 +679,445 @@ def file_paths(self) -> List[str]: directly by the user. """ self.cascade() - unique_files = set( - [file for table in self.visited for file in self._get_files(table)] - ) return [ - {"file_path": AnalysisNwbfile().get_abs_path(file)} - for file in unique_files + {"file_path": self.analysis_file_tbl.get_abs_path(file)} + for file in set( + [f for files in self.file_dict.values() for f in files] + ) if file is not None ] + + +class TableChains: + """Class for representing chains from parent to Merge table via parts. + + Functions as a plural version of TableChain, allowing a single `cascade` + call across all chains from parent -> Merge table. + + Attributes + ---------- + parent : Table + Parent or origin of chains. + child : Table + Merge table or destination of chains. + connection : datajoint.Connection, optional + Connection to database used to create FreeTable objects. Defaults to + parent.connection. + part_names : List[str] + List of full table names of child parts. + chains : List[TableChain] + List of TableChain objects for each part in child. + has_link : bool + Cached attribute to store whether parent is linked to child via any of + child parts. False if (a) child is not in parent.descendants or (b) + nx.NetworkXNoPath is raised by nx.shortest_path for all chains. + + Methods + ------- + __init__(parent, child, connection=None) + Initialize TableChains with parent and child tables. + __repr__() + Return full representation of chains. + Multiline parent -> child for each chain. + __len__() + Return number of chains with links. + __getitem__(index: Union[int, str]) + Return TableChain object at index, or use substring of table name. + cascade(restriction: str = None) + Return list of cascade for each chain in self.chains. + """ + + def __init__(self, parent, child, direction=Direction.DOWN, verbose=False): + self.parent = parent + self.child = child + self.connection = parent.connection + self.part_names = child.parts() + self.chains = [ + TableChain(parent, part, direction=direction, verbose=verbose) + for part in self.part_names + ] + self.has_link = any([chain.has_link for chain in self.chains]) + + # --------------------------- Dunder Properties --------------------------- + + def __repr__(self): + l_str = ",\n\t".join([str(c) for c in self.chains]) + "\n" + return f"{self.__class__.__name__}(\n\t{l_str})" + + def __len__(self): + return len([c for c in self.chains if c.has_link]) + + def __getitem__(self, index: Union[int, str]): + """Return FreeTable object at index.""" + return fuzzy_get(index, self.part_names, self.chains) + + # ---------------------------- Public Properties -------------------------- + + @property + def max_len(self): + """Return length of longest chain.""" + return max([len(chain) for chain in self.chains]) + + # ------------------------------ Graph Traversal -------------------------- + + def cascade( + self, restriction: str = None, direction: Direction = Direction.DOWN + ): + """Return list of cascades for each chain in self.chains.""" + restriction = restriction or self.parent.restriction or True + cascades = [] + for chain in self.chains: + if joined := chain.cascade(restriction, direction): + cascades.append(joined) + return cascades + + +class TableChain(RestrGraph): + """Class for representing a chain of tables. + + A chain is a sequence of tables from parent to child identified by + networkx.shortest_path. Parent -> Merge should use TableChains instead to + handle multiple paths to the respective parts of the Merge table. + + Attributes + ---------- + parent : str + Parent or origin of chain. + child : str + Child or destination of chain. + has_link : bool + Cached attribute to store whether parent is linked to child. + path : List[str] + Names of tables along the path from parent to child. + all_ft : List[dj.FreeTable] + List of FreeTable objects for each table in chain with restriction + applied. + + Methods + ------- + find_path(directed=True) + Returns path OrderedDict of full table names in chain. If directed is + True, uses directed graph. If False, uses undirected graph. Undirected + excludes PERIPHERAL_TABLES like interval_list, nwbfile, etc. to maintain + valid joins. + cascade(restriction: str = None, direction: str = "up") + Given a restriction at the beginning, return a restricted FreeTable + object at the end of the chain. If direction is 'up', start at the child + and move up to the parent. If direction is 'down', start at the parent. + """ + + def __init__( + self, + parent: Table = None, + child: Table = None, + direction: Direction = Direction.NONE, + search_restr: str = None, + cascade: bool = False, + verbose: bool = False, + allow_merge: bool = False, + banned_tables: List[str] = None, + **kwargs, + ): + if not allow_merge and child is not None and is_merge_table(child): + raise TypeError("Child is a merge table. Use TableChains instead.") + + self.parent = self._ensure_name(parent) + self.child = self._ensure_name(child) + + if not self.parent and not self.child: + raise ValueError("Parent or child table required.") + if not search_restr and not (self.parent and self.child): + raise ValueError("Search restriction required to find path.") + + seed_table = parent if isinstance(parent, Table) else child + super().__init__(seed_table=seed_table, verbose=verbose) + + self.no_visit.update(PERIPHERAL_TABLES) + self.no_visit.update(self._ensure_name(banned_tables) or []) + self.no_visit.difference_update([self.parent, self.child]) + self.searched_tables = set() + self.found_restr = False + self.link_type = None + self.searched_path = False + self._link_symbol = " -> " + + self.search_restr = search_restr + self.direction = Direction(direction) + + self.leaf = None + if search_restr and not parent: + self.direction = Direction.UP + self.leaf = self.child + if search_restr and not child: + self.direction = Direction.DOWN + self.leaf = self.parent + if self.leaf: + self._set_find_restr(self.leaf, search_restr) + self.add_leaf(self.leaf, True, cascade=False, direction=direction) + + if cascade and search_restr: + self.cascade_search() + self.cascade(restriction=search_restr) + self.cascaded = True + + # --------------------------- Dunder Properties --------------------------- + + def __str__(self): + """Return string representation of chain: parent -> child.""" + if not self.has_link: + return "No link" + return ( + self._camel(self.parent) + + self._link_symbol + + self._camel(self.child) + ) + + def __repr__(self): + """Return full representation of chain: parent -> {links} -> child.""" + if not self.has_link: + return "No link" + return "Chain: " + self.path_str + + def __len__(self): + """Return number of tables in chain.""" + if not self.has_link: + return 0 + return len(self.path) + + def __getitem__(self, index: Union[int, str]): + return fuzzy_get(index, self.path, self.all_ft) + + # ---------------------------- Public Properties -------------------------- + + @property + def has_link(self) -> bool: + """Return True if parent is linked to child. + + If not searched, search for path. If searched and no link is found, + return False. If searched and link is found, return True. + """ + if not self.searched_path: + _ = self.path + return self.link_type is not None + + @cached_property + def all_ft(self) -> List[dj.FreeTable]: + """Return list of FreeTable objects for each table in chain. + + Unused. Preserved for future debugging. + """ + if not self.has_link: + return None + return [ + self._get_ft(table, with_restr=False) + for table in self.path + if not table.isnumeric() + ] + + @property + def path_str(self) -> str: + if not self.path: + return "No link" + return self._link_symbol.join([self._camel(t) for t in self.path]) + + # ------------------------------ Graph Nodes ------------------------------ + + def _set_find_restr(self, table_name, restriction): + """Set restr to look for from leaf node.""" + if isinstance(restriction, dict): + restriction = [restriction] + + if isinstance(restriction, list) and all( + [isinstance(r, dict) for r in restriction] + ): + restr_attrs = set(key for restr in restriction for key in restr) + find_restr = restriction + elif isinstance(restriction, str): + restr_attrs = set() # modified by make_condition + table_ft = self._get_ft(table_name) + find_restr = make_condition(table_ft, restriction, restr_attrs) + else: + raise ValueError( + f"Invalid restriction type, use STR: {restriction}" + ) + + self._set_node(table_name, "restr_attrs", restr_attrs) + self._set_node(table_name, "find_restr", find_restr) + + def _get_find_restr(self, table) -> Tuple[str, Set[str]]: + """Get restr and restr_attrs from leaf node.""" + node = self._get_node(table) + return node.get("find_restr", False), node.get("restr_attrs", set()) + + # ---------------------------- Graph Traversal ---------------------------- + + def cascade_search(self) -> None: + if self.cascaded: + return + restriction, restr_attrs = self._get_find_restr(self.leaf) + self.cascade1_search( + table=self.leaf, + restriction=restriction, + restr_attrs=restr_attrs, + replace=True, + ) + if not self.found_restr: + searched = ( + "parents" if self.direction == Direction.UP else "children" + ) + logger.warning( + f"Restriction could not be applied to any {searched}.\n\t" + + f"From: {self.leaves}\n\t" + + f"Restr: {restriction}" + ) + + def _set_found_vars(self, table): + """Set found_restr and searched_tables.""" + self._set_restr(table, self.search_restr, replace=True) + self.found_restr = True + self.searched_tables.update(set(self._and_parts(table))) + + if self.direction == Direction.UP: + self.parent = table + elif self.direction == Direction.DOWN: + self.child = table + + self._log_truncate(f"FVars: {self._camel(table)}") + + self.direction = ~self.direction + _ = self.path # Reset path + + def cascade1_search( + self, + table: str = None, + restriction: str = True, + restr_attrs: Set[str] = None, + replace: bool = True, + limit: int = 100, + **kwargs, + ): + if ( + self.found_restr + or not table + or limit < 1 + or table in self.searched_tables + ): + return + + self.searched_tables.add(table) + next_tables, next_func = self._get_next_tables(table, self.direction) + + for next_table, data in next_tables.items(): + if next_table.isnumeric(): + next_table, data = next_func(next_table).popitem() + self._log_truncate( + f"Search Link: {self._camel(table)} -> {self._camel(next_table)}" + ) + + if next_table in self.no_visit or table == next_table: + reason = "Already Saw" if next_table == table else "Banned Tbl " + self._log_truncate(f"{reason}: {self._camel(next_table)}") + continue + + next_ft = self._get_ft(next_table) + if restr_attrs.issubset(set(next_ft.heading.names)): + self._log_truncate(f"Found: {self._camel(next_table)}") + self._set_found_vars(next_table) + return + + self.cascade1_search( + table=next_table, + restriction=restriction, + restr_attrs=restr_attrs, + replace=replace, + limit=limit - 1, + **data, + ) + if self.found_restr: + return + + # ------------------------------ Path Finding ------------------------------ + + def find_path(self, directed=True) -> List[str]: + """Return list of full table names in chain. + + Parameters + ---------- + directed : bool, optional + If True, use directed graph. If False, use undirected graph. + Defaults to True. Undirected permits paths to traverse from merge + part-parent -> merge part -> merge table. Undirected excludes + PERIPHERAL_TABLES like interval_list, nwbfile, etc. + + Returns + ------- + List[str] + List of names in the path. + """ + source, target = self.parent, self.child + search_graph = self.graph + + if not directed: + self.connection.dependencies.load() + self.undirect_graph = self.connection.dependencies.to_undirected() + search_graph = self.undirect_graph + + search_graph.remove_nodes_from(self.no_visit) + + try: + path = shortest_path(search_graph, source, target) + except NetworkXNoPath: + return None # No path found, parent func may do undirected search + except NodeNotFound: + self.searched_path = True # No path found, don't search again + return None + + self._log_truncate(f"Path Found : {path}") + + ignore_nodes = self.graph.nodes - set(path) + self.no_visit.update(ignore_nodes) + + self._log_truncate(f"Ignore : {ignore_nodes}") + return path + + @cached_property + def path(self) -> list: + """Return list of full table names in chain.""" + if self.searched_path and not self.has_link: + return None + + path = None + if path := self.find_path(directed=True): + self.link_type = "directed" + elif path := self.find_path(directed=False): + self.link_type = "undirected" + self.searched_path = True + + return path + + def cascade(self, restriction: str = None, direction: Direction = None): + if not self.has_link: + return + + _ = self.path + + direction = Direction(direction) or self.direction + if direction == Direction.UP: + start, end = self.child, self.parent + elif direction == Direction.DOWN: + start, end = self.parent, self.child + else: + raise ValueError(f"Invalid direction: {direction}") + + self.cascade1( + table=start, + restriction=restriction or self._get_restr(start), + direction=direction, + replace=True, + ) + + return self._get_ft(end, with_restr=True) + + def restrict_by(self, *args, **kwargs) -> None: + """Cascade passthrough.""" + return self.cascade(*args, **kwargs) diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 7af1fb2b4..89b1950cd 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -2,16 +2,40 @@ import inspect import os -from typing import Type +from typing import List, Type, Union import datajoint as dj import numpy as np from datajoint.user_tables import UserTable -from spyglass.utils.dj_chains import PERIPHERAL_TABLES from spyglass.utils.logging import logger from spyglass.utils.nwb_helper_fn import get_nwb_file +# Tables that should be excluded from the undirected graph when finding paths +# for TableChain objects and searching for an upstream key. +PERIPHERAL_TABLES = [ + "`common_interval`.`interval_list`", + "`common_nwbfile`.`__analysis_nwbfile_kachery`", + "`common_nwbfile`.`__nwbfile_kachery`", + "`common_nwbfile`.`analysis_nwbfile_kachery_selection`", + "`common_nwbfile`.`analysis_nwbfile_kachery`", + "`common_nwbfile`.`analysis_nwbfile`", + "`common_nwbfile`.`kachery_channel`", + "`common_nwbfile`.`nwbfile_kachery_selection`", + "`common_nwbfile`.`nwbfile_kachery`", + "`common_nwbfile`.`nwbfile`", +] + + +def fuzzy_get(index: Union[int, str], names: List[str], sources: List[str]): + """Given lists of items/names, return item at index or by substring.""" + if isinstance(index, int): + return sources[index] + for i, part in enumerate(names): + if index in part: + return sources[i] + return None + def unique_dicts(list_of_dict): """Remove duplicate dictionaries from a list.""" diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index 2b8aab5ef..0b8f16de6 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -1,8 +1,8 @@ -import re from contextlib import nullcontext from inspect import getmodule from itertools import chain as iter_chain from pprint import pprint +from re import sub as re_sub from time import time from typing import Union @@ -10,7 +10,7 @@ from datajoint.condition import make_condition from datajoint.errors import DataJointError from datajoint.preview import repr_html -from datajoint.utils import from_camel_case, to_camel_case +from datajoint.utils import from_camel_case, get_master, to_camel_case from IPython.core.display import HTML from spyglass.utils.logging import logger @@ -25,23 +25,29 @@ def is_merge_table(table): - """Return True if table definition matches the default Merge table. + """Return True if table fields exactly match Merge table.""" - Regex removes comments and blank lines before comparison. - """ + def trim_def(definition): + return re_sub( + r"\n\s*\n", "\n", re_sub(r"#.*\n", "\n", definition.strip()) + ) + + if isinstance(table, str): + table = dj.FreeTable(dj.conn(), table) if not isinstance(table, dj.Table): return False - if isinstance(table, dj.FreeTable): - fields, pk = table.heading.names, table.primary_key - return fields == [ - RESERVED_PRIMARY_KEY, - RESERVED_SECONDARY_KEY, - ] and pk == [RESERVED_PRIMARY_KEY] - return MERGE_DEFINITION == re.sub( - r"\n\s*\n", - "\n", - re.sub(r"#.*\n", "\n", getattr(table, "definition", "")), - ) + if get_master(table.full_table_name): + return False # Part tables are not merge tables + if not table.is_declared: + if tbl_def := getattr(table, "definition", None): + return trim_def(MERGE_DEFINITION) == trim_def(tbl_def) + logger.warning( + f"Cannot determine merge table status for {table.table_name}" + ) + return True + return table.primary_key == [ + RESERVED_PRIMARY_KEY + ] and table.heading.secondary_attributes == [RESERVED_SECONDARY_KEY] class Merge(dj.Manual): @@ -62,8 +68,8 @@ def __init__(self): if not is_merge_table(self): # Check definition logger.warn( "Merge table with non-default definition\n" - + f"Expected: {MERGE_DEFINITION.strip()}\n" - + f"Actual : {self.definition.strip()}" + + f"Expected:\n{MERGE_DEFINITION.strip()}\n" + + f"Actual :\n{self.definition.strip()}" ) for part in self.parts(as_objects=True): if part.primary_key != self.primary_key: @@ -74,12 +80,6 @@ def __init__(self): ) self._source_class_dict = {} - def _remove_comments(self, definition): - """Use regular expressions to remove comments and blank lines""" - return re.sub( # First remove comments, then blank lines - r"\n\s*\n", "\n", re.sub(r"#.*\n", "\n", definition) - ) - @staticmethod def _part_name(part=None): """Return the CamelCase name of a part table""" @@ -141,9 +141,6 @@ def _merge_restrict_parts( cls._ensure_dependencies_loaded() - if not restriction: - restriction = True - # Normalize restriction to sql string restr_str = make_condition(cls(), restriction, set()) @@ -387,8 +384,7 @@ def _ensure_dependencies_loaded(cls) -> None: Otherwise parts returns none """ - if not dj.conn.connection.dependencies._loaded: - dj.conn.connection.dependencies.load() + dj.conn.connection.dependencies.load() def insert(self, rows: list, **kwargs): """Merges table specific insert, ensuring data exists in part parents. @@ -783,7 +779,7 @@ def merge_fetch(self, restriction: str = True, *attrs, **kwargs) -> list: "No merge_fetch results.\n\t" + "If not restricting, try: `M.merge_fetch(True,'attr')\n\t" + "If restricting by source, use dict: " - + "`M.merge_fetch({'source':'X'})" + + "`M.merge_fetch({'source':'X'}" ) return results[0] if len(results) == 1 else results @@ -818,7 +814,7 @@ def super_delete(self, warn=True, *args, **kwargs): """ if warn: logger.warning("!! Bypassing cautious_delete !!") - self._log_use(start=time(), super_delete=True) + self._log_delete(start=time(), super_delete=True) super().delete(*args, **kwargs) @@ -830,10 +826,6 @@ def super_delete(self, warn=True, *args, **kwargs): def delete_downstream_merge( table: dj.Table, - restriction: str = None, - dry_run=True, - recurse_level=2, - disable_warning=False, **kwargs, ) -> list: """Given a table/restriction, id or delete relevant downstream merge entries @@ -858,12 +850,15 @@ def delete_downstream_merge( List[Tuple[dj.Table, dj.Table]] Entries in merge/part tables downstream of table input. """ + logger.warning( + "DEPRECATED: This function will be removed in `0.6`. " + + "Use AnyTable().delete_downstream_merge() instead." + ) + from spyglass.utils.dj_mixin import SpyglassMixin if not isinstance(table, SpyglassMixin): raise ValueError("Input must be a Spyglass Table.") table = table if isinstance(table, dj.Table) else table() - return table.delete_downstream_merge( - restriction=restriction, dry_run=dry_run, **kwargs - ) + return table.delete_downstream_merge(**kwargs) diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 05f510193..08fa377b3 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -13,12 +13,11 @@ from datajoint.expression import QueryExpression from datajoint.logging import logger as dj_logger from datajoint.table import Table -from datajoint.utils import get_master, user_choice +from datajoint.utils import get_master, to_camel_case, user_choice from networkx import NetworkXError from pymysql.err import DataError from spyglass.utils.database_settings import SHARED_MODULES -from spyglass.utils.dj_chains import TableChain, TableChains from spyglass.utils.dj_helper_fn import fetch_nwb, get_nwb_table from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK from spyglass.utils.dj_merge_tables import Merge, is_merge_table @@ -71,6 +70,8 @@ class SpyglassMixin: _session_pk = None # Session primary key. Mixin is ambivalent to Session pk _member_pk = None # LabMember primary key. Mixin ambivalent table structure + _banned_search_tables = set() # Tables to avoid in restrict_by + def __init__(self, *args, **kwargs): """Initialize SpyglassMixin. @@ -93,6 +94,33 @@ def __init__(self, *args, **kwargs): + self.full_table_name ) + # -------------------------- Misc helper methods -------------------------- + + @property + def camel_name(self): + """Return table name in camel case.""" + return to_camel_case(self.table_name) + + def _auto_increment(self, key, pk, *args, **kwargs): + """Auto-increment primary key.""" + if not key.get(pk): + key[pk] = (dj.U().aggr(self, n=f"max({pk})").fetch1("n") or 0) + 1 + return key + + def file_like(self, name=None, **kwargs): + """Convenience method for wildcard search on file name fields.""" + if not name: + return self & True + attr = None + for field in self.heading.names: + if "file" in field: + attr = field + break + if not attr: + logger.error(f"No file-like field found in {self.full_table_name}") + return + return self & f"{attr} LIKE '%{name}%'" + # ------------------------------- fetch_nwb ------------------------------- @cached_property @@ -203,6 +231,26 @@ def fetch_pynapple(self, *attrs, **kwargs): # ------------------------ delete_downstream_merge ------------------------ + def _import_merge_tables(self): + """Import all merge tables downstream of self.""" + from spyglass.decoding.decoding_merge import DecodingOutput # noqa F401 + from spyglass.lfp.lfp_merge import LFPOutput # noqa F401 + from spyglass.linearization.merge import ( + LinearizedPositionOutput, + ) # noqa F401 + from spyglass.position.position_merge import PositionOutput # noqa F401 + from spyglass.spikesorting.spikesorting_merge import ( # noqa F401 + SpikeSortingOutput, + ) + + _ = ( + DecodingOutput(), + LFPOutput(), + LinearizedPositionOutput(), + PositionOutput(), + SpikeSortingOutput(), + ) + @cached_property def _merge_tables(self) -> Dict[str, dj.FreeTable]: """Dict of merge tables downstream of self: {full_table_name: FreeTable}. @@ -215,10 +263,6 @@ def _merge_tables(self) -> Dict[str, dj.FreeTable]: visited = set() def search_descendants(parent): - # TODO: Add check that parents are in the graph. If not, raise error - # asking user to import the table. - # TODO: Make a `is_merge_table` helper, and check for false - # positives in the mixin init. for desc in parent.descendants(as_objects=True): if ( MERGE_PK not in desc.heading.names @@ -235,12 +279,16 @@ def search_descendants(parent): try: _ = search_descendants(self) - except NetworkXError as e: - table_name = "".join(e.args[0].split("`")[1:4]) - raise ValueError(f"Please import {table_name} and try again.") + except NetworkXError: + try: # Attempt to import missing table + self._import_merge_tables() + _ = search_descendants(self) + except NetworkXError as e: + table_name = "".join(e.args[0].split("`")[1:4]) + raise ValueError(f"Please import {table_name} and try again.") logger.info( - f"Building merge cache for {self.table_name}.\n\t" + f"Building merge cache for {self.camel_name}.\n\t" + f"Found {len(merge_tables)} downstream merge tables" ) @@ -258,9 +306,11 @@ def _merge_chains(self) -> OrderedDict[str, List[dj.FreeTable]]: with a new restriction. To recompute, add `reload_cache=True` to delete_downstream_merge call. """ + from spyglass.utils.dj_graph import TableChains # noqa F401 + merge_chains = {} for name, merge_table in self._merge_tables.items(): - chains = TableChains(self, merge_table, connection=self.connection) + chains = TableChains(self, merge_table) if len(chains): merge_chains[name] = chains @@ -268,13 +318,14 @@ def _merge_chains(self) -> OrderedDict[str, List[dj.FreeTable]]: # that the merge table with the longest chain is the most downstream. # A more sophisticated approach would order by length from self to # each merge part independently, but this is a good first approximation. + return OrderedDict( sorted( merge_chains.items(), key=lambda x: x[1].max_len, reverse=True ) ) - def _get_chain(self, substring) -> TableChains: + def _get_chain(self, substring): """Return chain from self to merge table with substring in name.""" for name, chain in self._merge_chains.items(): if substring.lower() in name: @@ -330,20 +381,19 @@ def delete_downstream_merge( Passed to datajoint.table.Table.delete. """ if reload_cache: - del self._merge_tables - del self._merge_chains + for attr in ["_merge_tables", "_merge_chains"]: + _ = self.__dict__.pop(attr, None) restriction = restriction or self.restriction or True merge_join_dict = {} for name, chain in self._merge_chains.items(): - join = chain.join(restriction) - if join: + if join := chain.cascade(restriction, direction="down"): merge_join_dict[name] = join if not merge_join_dict and not disable_warning: logger.warning( - f"No merge deletes found w/ {self.table_name} & " + f"No merge deletes found w/ {self.camel_name} & " + f"{restriction}.\n\tIf this is unexpected, try importing " + " Merge table(s) and running with `reload_cache`." ) @@ -424,8 +474,10 @@ def _get_exp_summary(self): return exp_missing + exp_present @cached_property - def _session_connection(self) -> Union[TableChain, bool]: + def _session_connection(self): """Path from Session table to self. False if no connection found.""" + from spyglass.utils.dj_graph import TableChain # noqa F401 + connection = TableChain(parent=self._delete_deps[-1], child=self) return connection if connection.has_link else False @@ -716,27 +768,132 @@ def fetch1(self, *args, log_fetch=True, **kwargs): self._log_fetch(*args, **kwargs) return ret - # ------------------------- Other helper methods ------------------------- + # ------------------------------ Restrict by ------------------------------ - def _auto_increment(self, key, pk, *args, **kwargs): - """Auto-increment primary key.""" - if not key.get(pk): - key[pk] = (dj.U().aggr(self, n=f"max({pk})").fetch1("n") or 0) + 1 - return key + def __lshift__(self, restriction) -> QueryExpression: + """Restriction by upstream operator e.g. ``q1 << q2``. - def file_like(self, name=None, **kwargs): - """Convenience method for wildcard search on file name fields.""" - if not name: - return self & True - attr = None - for field in self.heading.names: - if "file" in field: - attr = field - break - if not attr: - logger.error(f"No file-like field found in {self.full_table_name}") - return - return self & f"{attr} LIKE '%{name}%'" + Returns + ------- + QueryExpression + A restricted copy of the query expression using the nearest upstream + table for which the restriction is valid. + """ + return self.restrict_by(restriction, direction="up") + + def __rshift__(self, restriction) -> QueryExpression: + """Restriction by downstream operator e.g. ``q1 >> q2``. + + Returns + ------- + QueryExpression + A restricted copy of the query expression using the nearest upstream + table for which the restriction is valid. + """ + return self.restrict_by(restriction, direction="down") + + def _ensure_names(self, tables) -> List[str]: + """Ensure table is a string in a list.""" + if not isinstance(tables, (list, tuple, set)): + tables = [tables] + for table in tables: + return [getattr(table, "full_table_name", table) for t in tables] + + def ban_search_table(self, table): + """Ban table from search in restrict_by.""" + self._banned_search_tables.update(self._ensure_names(table)) + + def unban_search_table(self, table): + """Unban table from search in restrict_by.""" + self._banned_search_tables.difference_update(self._ensure_names(table)) + + def see_banned_tables(self): + """Print banned tables.""" + logger.info(f"Banned tables: {self._banned_search_tables}") + + def restrict_by( + self, + restriction: str = True, + direction: str = "up", + return_graph: bool = False, + verbose: bool = False, + **kwargs, + ) -> QueryExpression: + """Restrict self based on up/downstream table. + + If fails to restrict table, the shortest path may not have been correct. + If there's a different path that should be taken, ban unwanted tables. + + >>> my_table = MyTable() # must be instantced + >>> my_table.ban_search_table(UnwantedTable1) + >>> my_table.ban_search_table([UnwantedTable2, UnwantedTable3]) + >>> my_table.unban_search_table(UnwantedTable3) + >>> my_table.see_banned_tables() + >>> + >>> my_table << my_restriction + + Parameters + ---------- + restriction : str + Restriction to apply to the some table up/downstream of self. + direction : str, optional + Direction to search for valid restriction. Default 'up'. + return_graph : bool, optional + If True, return FindKeyGraph object. Default False, returns + restricted version of present table. + verbose : bool, optional + If True, print verbose output. Default False. + + Returns + ------- + Union[QueryExpression, FindKeyGraph] + Restricted version of present table or FindKeyGraph object. If + return_graph, use all_ft attribute to see all tables in cascade. + """ + from spyglass.utils.dj_graph import TableChain # noqa: F401 + + if restriction is True: + return self + + try: + ret = self.restrict(restriction) # Save time trying first + if len(ret) < len(self): + logger.warning("Restriction valid for this table. Using as is.") + return ret + except DataJointError: + pass # Could avoid try/except if assert_join_compatible return bool + logger.debug("Restriction not valid. Attempting to cascade.") + + if direction == "up": + parent, child = None, self + elif direction == "down": + parent, child = self, None + else: + raise ValueError("Direction must be 'up' or 'down'.") + + graph = TableChain( + parent=parent, + child=child, + direction=direction, + search_restr=restriction, + banned_tables=list(self._banned_search_tables), + allow_merge=True, + cascade=True, + verbose=verbose, + **kwargs, + ) + + if return_graph: + return graph + + ret = graph.leaf_ft[0] + if len(ret) == len(self) or len(ret) == 0: + logger.warning( + f"Failed to restrict with path: {graph.path_str}\n\t" + + "See `help(YourTable.restrict_by)`" + ) + + return ret class SpyglassMixinPart(SpyglassMixin, dj.Part): diff --git a/src/spyglass/utils/nwb_helper_fn.py b/src/spyglass/utils/nwb_helper_fn.py index 43eb70aa9..de7671b42 100644 --- a/src/spyglass/utils/nwb_helper_fn.py +++ b/src/spyglass/utils/nwb_helper_fn.py @@ -513,7 +513,7 @@ def get_nwb_copy_filename(nwb_file_name): def change_group_permissions( subject_ids, set_group_name, analysis_dir="/stelmo/nwb/analysis" ): - logger.warning("This function is deprecated and will be removed soon.") + logger.warning("DEPRECATED: This function will be removed in `0.6`.") # Change to directory with analysis nwb files os.chdir(analysis_dir) # Get nwb file directories with specified subject ids diff --git a/tests/common/test_device.py b/tests/common/test_device.py index 49bbd9027..19103cf98 100644 --- a/tests/common/test_device.py +++ b/tests/common/test_device.py @@ -2,7 +2,7 @@ from numpy import array_equal -def test_invalid_device(common, populate_exception): +def test_invalid_device(common, populate_exception, mini_insert): device_dict = common.DataAcquisitionDevice.fetch(as_dict=True)[0] device_dict["other"] = "invalid" with pytest.raises(populate_exception): diff --git a/tests/conftest.py b/tests/conftest.py index 0bcb4a3fd..7950854d6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,10 @@ +"""Configuration for pytest, including fixtures and command line options. + +Fixtures in this script are mad available to all tests in the test suite. +conftest.py files in subdirectories have fixtures that are only available to +tests in that subdirectory. +""" + import os import sys import warnings @@ -7,17 +14,19 @@ from time import sleep as tsleep import datajoint as dj +import numpy as np import pynwb import pytest from datajoint.logging import logger as dj_logger from .container import DockerMySQLManager -# ---------------------- CONSTANTS --------------------- +warnings.filterwarnings("ignore", category=UserWarning, module="hdmf") + +# ------------------------------- TESTS CONFIG ------------------------------- # globals in pytest_configure: # BASE_DIR, RAW_DIR, SERVER, TEARDOWN, VERBOSE, TEST_FILE, DOWNLOAD -warnings.filterwarnings("ignore", category=UserWarning, module="hdmf") def pytest_addoption(parser): @@ -131,7 +140,7 @@ def pytest_unconfigure(config): SERVER.stop() -# ------------------- FIXTURES ------------------- +# ---------------------------- FIXTURES, TEST ENV ---------------------------- @pytest.fixture(scope="session") @@ -143,6 +152,27 @@ def verbose(): @pytest.fixture(scope="session", autouse=True) def verbose_context(verbose): """Verbosity context for suppressing Spyglass logging.""" + + class QuietStdOut: + """Used to quiet all prints and logging as context manager.""" + + def __init__(self): + from spyglass.utils import logger as spyglass_logger + + self.spy_logger = spyglass_logger + self.previous_level = None + + def __enter__(self): + self.previous_level = self.spy_logger.getEffectiveLevel() + self.spy_logger.setLevel("CRITICAL") + self._original_stdout = sys.stdout + sys.stdout = open(os.devnull, "w") + + def __exit__(self, exc_type, exc_val, exc_tb): + self.spy_logger.setLevel(self.previous_level) + sys.stdout.close() + sys.stdout = self._original_stdout + yield nullcontext() if verbose else QuietStdOut() @@ -193,6 +223,9 @@ def raw_dir(base_dir): yield base_dir / "raw" +# ------------------------------- FIXTURES, DATA ------------------------------- + + @pytest.fixture(scope="session") def mini_path(raw_dir): path = raw_dir / TEST_FILE @@ -251,12 +284,14 @@ def load_config(dj_conn, base_dir): from spyglass.settings import SpyglassConfig yield SpyglassConfig().load_config( - base_dir=base_dir, test_mode=True, force_reload=True + base_dir=base_dir, debug_mode=False, test_mode=True, force_reload=True ) @pytest.fixture(autouse=True, scope="session") -def mini_insert(mini_path, teardown, server, load_config): +def mini_insert( + dj_conn, mini_path, mini_content, teardown, server, load_config +): from spyglass.common import LabMember, Nwbfile, Session # noqa: E402 from spyglass.data_import import insert_sessions # noqa: E402 from spyglass.spikesorting.spikesorting_merge import ( # noqa: E402 @@ -264,6 +299,8 @@ def mini_insert(mini_path, teardown, server, load_config): ) from spyglass.utils.nwb_helper_fn import close_nwb_files # noqa: E402 + _ = SpikeSortingOutput() + LabMember().insert1( ["Root User", "Root", "User"], skip_duplicates=not teardown ) @@ -287,8 +324,7 @@ def mini_insert(mini_path, teardown, server, load_config): yield close_nwb_files() - # Note: no need to run deletes in teardown, since we are using teardown - # will remove the container + # Note: no need to run deletes in teardown, bc removing the container @pytest.fixture(scope="session") @@ -301,6 +337,9 @@ def mini_dict(mini_copy_name): yield {"nwb_file_name": mini_copy_name} +# --------------------------- FIXTURES, SUBMODULES --------------------------- + + @pytest.fixture(scope="session") def common(dj_conn): from spyglass import common @@ -322,6 +361,41 @@ def settings(dj_conn): yield settings +@pytest.fixture(scope="session") +def sgp(common): + from spyglass import position + + yield position + + +@pytest.fixture(scope="session") +def lfp(common): + from spyglass import lfp + + return lfp + + +@pytest.fixture(scope="session") +def lfp_band(lfp): + from spyglass.lfp.analysis.v1 import lfp_band + + return lfp_band + + +@pytest.fixture(scope="session") +def sgl(common): + from spyglass import linearization + + yield linearization + + +@pytest.fixture(scope="session") +def sgpl(sgl): + from spyglass.linearization import v1 + + yield v1 + + @pytest.fixture(scope="session") def populate_exception(): from spyglass.common.errors import PopulateException @@ -329,11 +403,7 @@ def populate_exception(): yield PopulateException -@pytest.fixture(scope="session") -def sgp(common): - from spyglass import position - - yield position +# ------------------------- FIXTURES, POSITION TABLES ------------------------- @pytest.fixture(scope="session") @@ -418,12 +488,16 @@ def trodes_pos_v1(teardown, sgp, trodes_sel_keys): def pos_merge_tables(dj_conn): """Return the merge tables as activated.""" from spyglass.common.common_position import TrackGraph + from spyglass.lfp.lfp_merge import LFPOutput from spyglass.linearization.merge import LinearizedPositionOutput from spyglass.position.position_merge import PositionOutput # must import common_position before LinOutput to avoid circular import - _ = TrackGraph() + + # import LFPOutput to use when testing mixin cascade + _ = LFPOutput() + return [PositionOutput(), LinearizedPositionOutput()] @@ -442,25 +516,258 @@ def pos_merge_key(pos_merge, trodes_pos_v1, trodes_sel_keys): yield pos_merge.merge_get_part(trodes_sel_keys[-1]).fetch1("KEY") -# ------------------ GENERAL FUNCTION ------------------ +# ---------------------- FIXTURES, LINEARIZATION TABLES ---------------------- +# ---------------------- Note: Used to test RestrGraph ----------------------- + + +@pytest.fixture(scope="session") +def pos_lin_key(trodes_sel_keys): + yield trodes_sel_keys[-1] + + +@pytest.fixture(scope="session") +def position_info(pos_merge, pos_merge_key): + yield (pos_merge & {"merge_id": pos_merge_key}).fetch1_dataframe() + + +@pytest.fixture(scope="session") +def track_graph_key(): + yield {"track_graph_name": "6 arm"} + + +@pytest.fixture(scope="session") +def track_graph(teardown, sgpl, track_graph_key): + node_positions = np.array( + [ + (79.910, 216.720), # top left well 0 + (132.031, 187.806), # top middle intersection 1 + (183.718, 217.713), # top right well 2 + (132.544, 132.158), # middle intersection 3 + (87.202, 101.397), # bottom left intersection 4 + (31.340, 126.110), # middle left well 5 + (180.337, 104.799), # middle right intersection 6 + (92.693, 42.345), # bottom left well 7 + (183.784, 45.375), # bottom right well 8 + (231.338, 136.281), # middle right well 9 + ] + ) + + edges = np.array( + [ + (0, 1), + (1, 2), + (1, 3), + (3, 4), + (4, 5), + (3, 6), + (6, 9), + (4, 7), + (6, 8), + ] + ) + + linear_edge_order = [ + (3, 6), + (6, 8), + (6, 9), + (3, 1), + (1, 2), + (1, 0), + (3, 4), + (4, 5), + (4, 7), + ] + linear_edge_spacing = 15 + + sgpl.TrackGraph.insert1( + { + **track_graph_key, + "environment": track_graph_key["track_graph_name"], + "node_positions": node_positions, + "edges": edges, + "linear_edge_order": linear_edge_order, + "linear_edge_spacing": linear_edge_spacing, + }, + skip_duplicates=True, + ) + + yield sgpl.TrackGraph & {"track_graph_name": "6 arm"} + if teardown: + sgpl.TrackGraph().delete(safemode=False) + + +@pytest.fixture(scope="session") +def lin_param_key(): + yield {"linearization_param_name": "default"} + + +@pytest.fixture(scope="session") +def lin_params( + teardown, + sgpl, + lin_param_key, +): + param_table = sgpl.LinearizationParameters() + param_table.insert1(lin_param_key, skip_duplicates=True) + yield param_table + + +@pytest.fixture(scope="session") +def lin_sel_key( + pos_merge_key, track_graph_key, lin_param_key, lin_params, track_graph +): + yield { + "pos_merge_id": pos_merge_key["merge_id"], + **track_graph_key, + **lin_param_key, + } + + +@pytest.fixture(scope="session") +def lin_sel(teardown, sgpl, lin_sel_key): + sel_table = sgpl.LinearizationSelection() + sel_table.insert1(lin_sel_key, skip_duplicates=True) + yield sel_table + if teardown: + sel_table.delete(safemode=False) -class QuietStdOut: - """If quiet_spy, used to quiet prints, teardowns and table.delete prints""" +@pytest.fixture(scope="session") +def lin_v1(teardown, sgpl, lin_sel): + v1 = sgpl.LinearizedPositionV1() + v1.populate() + yield v1 + if teardown: + v1.delete(safemode=False) + + +@pytest.fixture(scope="session") +def lin_merge_key(lin_merge, lin_v1, lin_sel_key): + yield lin_merge.merge_get_part(lin_sel_key).fetch1("KEY") - def __init__(self): - from spyglass.utils import logger as spyglass_logger - self.spy_logger = spyglass_logger - self.previous_level = None +# --------------------------- FIXTURES, LFP TABLES --------------------------- +# ---------------- Note: LFPOuput is used to test RestrGraph ----------------- + + +@pytest.fixture(scope="module") +def lfp_band_v1(lfp_band): + yield lfp_band.LFPBandV1() + + +@pytest.fixture(scope="session") +def firfilters_table(common): + return common.FirFilterParameters() + + +@pytest.fixture(scope="session") +def electrodegroup_table(lfp): + return lfp.v1.LFPElectrodeGroup() - def __enter__(self): - self.previous_level = self.spy_logger.getEffectiveLevel() - self.spy_logger.setLevel("CRITICAL") - self._original_stdout = sys.stdout - sys.stdout = open(os.devnull, "w") - def __exit__(self, exc_type, exc_val, exc_tb): - self.spy_logger.setLevel(self.previous_level) - sys.stdout.close() - sys.stdout = self._original_stdout +@pytest.fixture(scope="session") +def lfp_constants(common, mini_copy_name, mini_dict): + n_delay = 9 + lfp_electrode_group_name = "test" + orig_list_name = "01_s1" + orig_valid_times = ( + common.IntervalList + & mini_dict + & f"interval_list_name = '{orig_list_name}'" + ).fetch1("valid_times") + new_list_name = orig_list_name + f"_first{n_delay}" + new_list_key = { + "nwb_file_name": mini_copy_name, + "interval_list_name": new_list_name, + "valid_times": np.asarray( + [[orig_valid_times[0, 0], orig_valid_times[0, 0] + n_delay]] + ), + } + + yield dict( + lfp_electrode_ids=[0], + lfp_electrode_group_name=lfp_electrode_group_name, + lfp_eg_key={ + "nwb_file_name": mini_copy_name, + "lfp_electrode_group_name": lfp_electrode_group_name, + }, + n_delay=n_delay, + orig_interval_list_name=orig_list_name, + orig_valid_times=orig_valid_times, + interval_list_name=new_list_name, + interval_key=new_list_key, + filter1_name="LFP 0-400 Hz", + filter_sampling_rate=30_000, + filter2_name="Theta 5-11 Hz", + lfp_band_electrode_ids=[0], # assumes we've filtered these electrodes + lfp_band_sampling_rate=100, # desired sampling rate + ) + + +@pytest.fixture(scope="session") +def add_electrode_group( + firfilters_table, + electrodegroup_table, + mini_copy_name, + lfp_constants, +): + firfilters_table.create_standard_filters() + group_name = lfp_constants.get("lfp_electrode_group_name") + electrodegroup_table.create_lfp_electrode_group( + nwb_file_name=mini_copy_name, + group_name=group_name, + electrode_list=np.array(lfp_constants.get("lfp_electrode_ids")), + ) + assert len( + electrodegroup_table & {"lfp_electrode_group_name": group_name} + ), "Failed to add LFPElectrodeGroup." + yield + + +@pytest.fixture(scope="session") +def add_interval(common, lfp_constants): + common.IntervalList.insert1( + lfp_constants.get("interval_key"), skip_duplicates=True + ) + yield lfp_constants.get("interval_list_name") + + +@pytest.fixture(scope="session") +def add_selection( + lfp, common, add_electrode_group, add_interval, lfp_constants +): + lfp_s_key = { + **lfp_constants.get("lfp_eg_key"), + "target_interval_list_name": add_interval, + "filter_name": lfp_constants.get("filter1_name"), + "filter_sampling_rate": lfp_constants.get("filter_sampling_rate"), + } + lfp.v1.LFPSelection.insert1(lfp_s_key, skip_duplicates=True) + yield lfp_s_key + + +@pytest.fixture(scope="session") +def lfp_s_key(lfp_constants, mini_copy_name): + yield { + "nwb_file_name": mini_copy_name, + "lfp_electrode_group_name": lfp_constants.get( + "lfp_electrode_group_name" + ), + "target_interval_list_name": lfp_constants.get("interval_list_name"), + } + + +@pytest.fixture(scope="session") +def populate_lfp(lfp, add_selection, lfp_s_key): + lfp.v1.LFPV1().populate(add_selection) + yield {"merge_id": (lfp.LFPOutput.LFPV1() & lfp_s_key).fetch1("merge_id")} + + +@pytest.fixture(scope="session") +def lfp_merge_key(populate_lfp): + yield populate_lfp + + +@pytest.fixture(scope="session") +def lfp_v1_key(lfp, lfp_s_key): + yield (lfp.v1.LFPV1 & lfp_s_key).fetch1("KEY") diff --git a/tests/container.py b/tests/container.py index 04e176fee..fa26f1c46 100644 --- a/tests/container.py +++ b/tests/container.py @@ -193,7 +193,7 @@ def creds(self): "database.user": self.user, "database.port": int(self.port), "safemode": "false", - "custom": {"test_mode": True}, + "custom": {"test_mode": True, "debug_mode": False}, } @property diff --git a/tests/lfp/conftest.py b/tests/lfp/conftest.py index 354803493..e62a03dea 100644 --- a/tests/lfp/conftest.py +++ b/tests/lfp/conftest.py @@ -1,140 +1,7 @@ -import numpy as np import pytest from pynwb import NWBHDF5IO -@pytest.fixture(scope="session") -def lfp(common): - from spyglass import lfp - - return lfp - - -@pytest.fixture(scope="session") -def lfp_band(lfp): - from spyglass.lfp.analysis.v1 import lfp_band - - return lfp_band - - -@pytest.fixture(scope="session") -def firfilters_table(common): - return common.FirFilterParameters() - - -@pytest.fixture(scope="session") -def electrodegroup_table(lfp): - return lfp.v1.LFPElectrodeGroup() - - -@pytest.fixture(scope="session") -def lfp_constants(common, mini_copy_name, mini_dict): - n_delay = 9 - lfp_electrode_group_name = "test" - orig_list_name = "01_s1" - orig_valid_times = ( - common.IntervalList - & mini_dict - & f"interval_list_name = '{orig_list_name}'" - ).fetch1("valid_times") - new_list_name = orig_list_name + f"_first{n_delay}" - new_list_key = { - "nwb_file_name": mini_copy_name, - "interval_list_name": new_list_name, - "valid_times": np.asarray( - [[orig_valid_times[0, 0], orig_valid_times[0, 0] + n_delay]] - ), - } - - yield dict( - lfp_electrode_ids=[0], - lfp_electrode_group_name=lfp_electrode_group_name, - lfp_eg_key={ - "nwb_file_name": mini_copy_name, - "lfp_electrode_group_name": lfp_electrode_group_name, - }, - n_delay=n_delay, - orig_interval_list_name=orig_list_name, - orig_valid_times=orig_valid_times, - interval_list_name=new_list_name, - interval_key=new_list_key, - filter1_name="LFP 0-400 Hz", - filter_sampling_rate=30_000, - filter2_name="Theta 5-11 Hz", - lfp_band_electrode_ids=[0], # assumes we've filtered these electrodes - lfp_band_sampling_rate=100, # desired sampling rate - ) - - -@pytest.fixture(scope="session") -def add_electrode_group( - firfilters_table, - electrodegroup_table, - mini_copy_name, - lfp_constants, -): - firfilters_table.create_standard_filters() - group_name = lfp_constants.get("lfp_electrode_group_name") - electrodegroup_table.create_lfp_electrode_group( - nwb_file_name=mini_copy_name, - group_name=group_name, - electrode_list=np.array(lfp_constants.get("lfp_electrode_ids")), - ) - assert len( - electrodegroup_table & {"lfp_electrode_group_name": group_name} - ), "Failed to add LFPElectrodeGroup." - yield - - -@pytest.fixture(scope="session") -def add_interval(common, lfp_constants): - common.IntervalList.insert1( - lfp_constants.get("interval_key"), skip_duplicates=True - ) - yield lfp_constants.get("interval_list_name") - - -@pytest.fixture(scope="session") -def add_selection( - lfp, common, add_electrode_group, add_interval, lfp_constants -): - lfp_s_key = { - **lfp_constants.get("lfp_eg_key"), - "target_interval_list_name": add_interval, - "filter_name": lfp_constants.get("filter1_name"), - "filter_sampling_rate": lfp_constants.get("filter_sampling_rate"), - } - lfp.v1.LFPSelection.insert1(lfp_s_key, skip_duplicates=True) - yield lfp_s_key - - -@pytest.fixture(scope="session") -def lfp_s_key(lfp_constants, mini_copy_name): - yield { - "nwb_file_name": mini_copy_name, - "lfp_electrode_group_name": lfp_constants.get( - "lfp_electrode_group_name" - ), - "target_interval_list_name": lfp_constants.get("interval_list_name"), - } - - -@pytest.fixture(scope="session") -def populate_lfp(lfp, add_selection, lfp_s_key): - lfp.v1.LFPV1().populate(add_selection) - yield {"merge_id": (lfp.LFPOutput.LFPV1() & lfp_s_key).fetch1("merge_id")} - - -@pytest.fixture(scope="session") -def lfp_merge_key(populate_lfp): - yield populate_lfp - - -@pytest.fixture(scope="session") -def lfp_v1_key(lfp, lfp_s_key): - yield (lfp.v1.LFPV1 & lfp_s_key).fetch1("KEY") - - @pytest.fixture(scope="module") def lfp_analysis_raw(common, lfp, populate_lfp, mini_dict): abs_path = (common.AnalysisNwbfile * lfp.v1.LFPV1 & mini_dict).fetch( diff --git a/tests/lfp/test_lfp.py b/tests/lfp/test_lfp.py index 51b2e96f4..b496ae445 100644 --- a/tests/lfp/test_lfp.py +++ b/tests/lfp/test_lfp.py @@ -37,11 +37,6 @@ def test_lfp_band_dataframe(lfp_band_analysis_raw, lfp_band, lfp_band_key): assert df_raw.equals(df_fetch), "LFPBand dataframe not match." -@pytest.fixture(scope="module") -def lfp_band_v1(lfp_band): - yield lfp_band.LFPBandV1() - - def test_lfp_band_compute_signal_invalid(lfp_band_v1): with pytest.raises(ValueError): lfp_band_v1.compute_analytic_signal([4]) diff --git a/tests/linearization/conftest.py b/tests/linearization/conftest.py deleted file mode 100644 index 505dcc816..000000000 --- a/tests/linearization/conftest.py +++ /dev/null @@ -1,142 +0,0 @@ -import numpy as np -import pytest - - -@pytest.fixture(scope="session") -def sgl(common): - from spyglass import linearization - - yield linearization - - -@pytest.fixture(scope="session") -def sgpl(sgl): - from spyglass.linearization import v1 - - yield v1 - - -@pytest.fixture(scope="session") -def pos_lin_key(trodes_sel_keys): - yield trodes_sel_keys[-1] - - -@pytest.fixture(scope="session") -def position_info(pos_merge, pos_merge_key): - yield (pos_merge & {"merge_id": pos_merge_key}).fetch1_dataframe() - - -@pytest.fixture(scope="session") -def track_graph_key(): - yield {"track_graph_name": "6 arm"} - - -@pytest.fixture(scope="session") -def track_graph(teardown, sgpl, track_graph_key): - node_positions = np.array( - [ - (79.910, 216.720), # top left well 0 - (132.031, 187.806), # top middle intersection 1 - (183.718, 217.713), # top right well 2 - (132.544, 132.158), # middle intersection 3 - (87.202, 101.397), # bottom left intersection 4 - (31.340, 126.110), # middle left well 5 - (180.337, 104.799), # middle right intersection 6 - (92.693, 42.345), # bottom left well 7 - (183.784, 45.375), # bottom right well 8 - (231.338, 136.281), # middle right well 9 - ] - ) - - edges = np.array( - [ - (0, 1), - (1, 2), - (1, 3), - (3, 4), - (4, 5), - (3, 6), - (6, 9), - (4, 7), - (6, 8), - ] - ) - - linear_edge_order = [ - (3, 6), - (6, 8), - (6, 9), - (3, 1), - (1, 2), - (1, 0), - (3, 4), - (4, 5), - (4, 7), - ] - linear_edge_spacing = 15 - - sgpl.TrackGraph.insert1( - { - **track_graph_key, - "environment": track_graph_key["track_graph_name"], - "node_positions": node_positions, - "edges": edges, - "linear_edge_order": linear_edge_order, - "linear_edge_spacing": linear_edge_spacing, - }, - skip_duplicates=True, - ) - - yield sgpl.TrackGraph & {"track_graph_name": "6 arm"} - if teardown: - sgpl.TrackGraph().delete(safemode=False) - - -@pytest.fixture(scope="session") -def lin_param_key(): - yield {"linearization_param_name": "default"} - - -@pytest.fixture(scope="session") -def lin_params( - teardown, - sgpl, - lin_param_key, -): - param_table = sgpl.LinearizationParameters() - param_table.insert1(lin_param_key, skip_duplicates=True) - yield param_table - - -@pytest.fixture(scope="session") -def lin_sel_key( - pos_merge_key, track_graph_key, lin_param_key, lin_params, track_graph -): - yield { - "pos_merge_id": pos_merge_key["merge_id"], - **track_graph_key, - **lin_param_key, - } - - -@pytest.fixture(scope="session") -def lin_sel(teardown, sgpl, lin_sel_key): - sel_table = sgpl.LinearizationSelection() - sel_table.insert1(lin_sel_key, skip_duplicates=True) - yield sel_table - if teardown: - sel_table.delete(safemode=False) - - -@pytest.fixture(scope="session") -def lin_v1(teardown, sgpl, lin_sel): - v1 = sgpl.LinearizedPositionV1() - v1.populate() - yield v1 - if teardown: - v1.delete(safemode=False) - - -@pytest.fixture(scope="session") -def lin_merge_key(lin_merge, lin_sel_key): - yield lin_merge.merge_get_part(lin_sel_key).fetch1("KEY") diff --git a/tests/linearization/test_lin.py b/tests/linearization/test_lin.py index 4225ad5bf..a5db28d9a 100644 --- a/tests/linearization/test_lin.py +++ b/tests/linearization/test_lin.py @@ -9,4 +9,4 @@ def test_fetch1_dataframe(lin_v1, lin_merge, lin_merge_key): assert hash_df == hash_exp, "Dataframe differs from expected" -## Todo: Add more tests of this pipeline, not just the fetch1_dataframe method +# TODO: Add more tests of this pipeline, not just the fetch1_dataframe method diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/utils/conftest.py b/tests/utils/conftest.py index 3503f9649..a4bc7f900 100644 --- a/tests/utils/conftest.py +++ b/tests/utils/conftest.py @@ -38,6 +38,8 @@ def chains(Nwbfile): ) # noqa: F401 from spyglass.position.position_merge import PositionOutput # noqa: F401 + _ = LFPOutput, LinearizedPositionOutput, PositionOutput + yield Nwbfile._get_chain("linear") @@ -51,6 +53,243 @@ def chain(chains): def no_link_chain(Nwbfile): """Return example TableChain object with no link.""" from spyglass.common.common_usage import InsertError - from spyglass.utils.dj_chains import TableChain + from spyglass.utils.dj_graph import TableChain yield TableChain(Nwbfile, InsertError()) + + +@pytest.fixture(scope="module") +def _Merge(): + """Return the _Merge class.""" + from spyglass.utils import _Merge + + yield _Merge + + +@pytest.fixture(scope="module") +def SpyglassMixin(): + """Return a mixin class.""" + from spyglass.utils import SpyglassMixin + + yield SpyglassMixin + + +@pytest.fixture(scope="module") +def graph_schema(SpyglassMixin, _Merge): + """ + NOTE: Must declare tables within fixture to avoid loading config defaults. + """ + parent_id = range(10) + parent_attr = [i + 10 for i in range(2, 12)] + other_id = range(9) + other_attr = [i + 10 for i in range(3, 12)] + intermediate_id = range(2, 10) + intermediate_attr = [i + 10 for i in range(4, 12)] + pk_id = range(3, 10) + pk_attr = [i + 10 for i in range(5, 12)] + sk_id = range(6) + sk_attr = [i + 10 for i in range(6, 12)] + pk_sk_id = range(5) + pk_sk_attr = [i + 10 for i in range(7, 12)] + pk_alias_id = range(4) + pk_alias_attr = [i + 10 for i in range(8, 12)] + sk_alias_id = range(3) + sk_alias_attr = [i + 10 for i in range(9, 12)] + + def offset(gen, offset): + return list(gen)[offset:] + + class ParentNode(SpyglassMixin, dj.Lookup): + definition = """ + parent_id: int + --- + parent_attr : int + """ + contents = [(i, j) for i, j in zip(parent_id, parent_attr)] + + class OtherParentNode(SpyglassMixin, dj.Lookup): + definition = """ + other_id: int + --- + other_attr : int + """ + contents = [(i, j) for i, j in zip(other_id, other_attr)] + + class IntermediateNode(SpyglassMixin, dj.Lookup): + definition = """ + intermediate_id: int + --- + -> ParentNode + intermediate_attr : int + """ + contents = [ + (i, j, k) + for i, j, k in zip( + intermediate_id, offset(parent_id, 1), intermediate_attr + ) + ] + + class PkNode(SpyglassMixin, dj.Lookup): + definition = """ + pk_id: int + -> IntermediateNode + --- + pk_attr : int + """ + contents = [ + (i, j, k) + for i, j, k in zip(pk_id, offset(intermediate_id, 2), pk_attr) + ] + + class SkNode(SpyglassMixin, dj.Lookup): + definition = """ + sk_id: int + --- + -> IntermediateNode + sk_attr : int + """ + contents = [ + (i, j, k) + for i, j, k in zip(sk_id, offset(intermediate_id, 3), sk_attr) + ] + + class PkSkNode(SpyglassMixin, dj.Lookup): + definition = """ + pk_sk_id: int + -> IntermediateNode + --- + -> OtherParentNode + pk_sk_attr : int + """ + contents = [ + (i, j, k, m) + for i, j, k, m in zip( + pk_sk_id, offset(intermediate_id, 4), other_id, pk_sk_attr + ) + ] + + class PkAliasNode(SpyglassMixin, dj.Lookup): + definition = """ + pk_alias_id: int + -> PkNode.proj(fk_pk_id='pk_id') + --- + pk_alias_attr : int + """ + contents = [ + (i, j, k, m) + for i, j, k, m in zip( + pk_alias_id, + offset(pk_id, 1), + offset(intermediate_id, 3), + pk_alias_attr, + ) + ] + + class SkAliasNode(SpyglassMixin, dj.Lookup): + definition = """ + sk_alias_id: int + --- + -> SkNode.proj(fk_sk_id='sk_id') + -> PkSkNode + sk_alias_attr : int + """ + contents = [ + (i, j, k, m, n) + for i, j, k, m, n in zip( + sk_alias_id, + offset(sk_id, 2), + offset(pk_sk_id, 1), + offset(intermediate_id, 5), + sk_alias_attr, + ) + ] + + class MergeOutput(_Merge, SpyglassMixin): + definition = """ + merge_id: uuid + --- + source: varchar(32) + """ + + class PkNode(dj.Part): + definition = """ + -> MergeOutput + --- + -> PkNode + """ + + class MergeChild(SpyglassMixin, dj.Manual): + definition = """ + -> MergeOutput + merge_child_id: int + --- + merge_child_attr: int + """ + + yield { + "ParentNode": ParentNode, + "OtherParentNode": OtherParentNode, + "IntermediateNode": IntermediateNode, + "PkNode": PkNode, + "SkNode": SkNode, + "PkSkNode": PkSkNode, + "PkAliasNode": PkAliasNode, + "SkAliasNode": SkAliasNode, + "MergeOutput": MergeOutput, + "MergeChild": MergeChild, + } + + +@pytest.fixture(scope="module") +def graph_tables(dj_conn, graph_schema): + + schema = dj.Schema(context=graph_schema) + + for table in graph_schema.values(): + schema(table) + + schema.activate("test_graph", connection=dj_conn) + + # Merge inserts after declaring tables + merge_keys = graph_schema["PkNode"].fetch("KEY", offset=1, as_dict=True) + graph_schema["MergeOutput"].insert(merge_keys, skip_duplicates=True) + merge_child_keys = graph_schema["MergeOutput"].merge_fetch( + True, "merge_id", offset=1 + ) + merge_child_inserts = [ + (i, j, k + 10) + for i, j, k in zip(merge_child_keys, range(4), range(10, 15)) + ] + graph_schema["MergeChild"].insert(merge_child_inserts, skip_duplicates=True) + + yield graph_schema + + schema.drop(force=True) + + +@pytest.fixture(scope="module") +def graph_tables_many_to_one(graph_tables): + ParentNode = graph_tables["ParentNode"] + IntermediateNode = graph_tables["IntermediateNode"] + PkSkNode = graph_tables["PkSkNode"] + + pk_sk_keys = PkSkNode().fetch(as_dict=True)[-2:] + new_inserts = [ + { + "pk_sk_id": k["pk_sk_id"] + 3, + "intermediate_id": k["intermediate_id"] + 3, + "intermediate_attr": k["intermediate_id"] + 16, + "parent_id": k["intermediate_id"] - 1, + "parent_attr": k["intermediate_id"] + 11, + "other_id": k["other_id"], # No change + "pk_sk_attr": k["pk_sk_attr"] + 10, + } + for k in pk_sk_keys + ] + + insert_kwargs = {"ignore_extra_fields": True, "skip_duplicates": True} + ParentNode.insert(new_inserts, **insert_kwargs) + IntermediateNode.insert(new_inserts, **insert_kwargs) + PkSkNode.insert(new_inserts, **insert_kwargs) + + yield graph_tables diff --git a/tests/utils/test_chains.py b/tests/utils/test_chains.py index 7ba4b1fa2..66d9772c3 100644 --- a/tests/utils/test_chains.py +++ b/tests/utils/test_chains.py @@ -4,15 +4,20 @@ @pytest.fixture(scope="session") def TableChain(): - from spyglass.utils.dj_chains import TableChain + from spyglass.utils.dj_graph import TableChain return TableChain +def full_to_camel(t): + return to_camel_case(t.split(".")[-1].strip("`")) + + def test_chains_repr(chains): """Test that the repr of a TableChains object is as expected.""" repr_got = repr(chains) - repr_exp = "\n".join([str(c) for c in chains.chains]) + chain_st = ",\n\t".join([str(c) for c in chains.chains]) + "\n" + repr_exp = f"TableChains(\n\t{chain_st})" assert repr_got == repr_exp, "Unexpected repr of TableChains object." @@ -32,11 +37,13 @@ def test_invalid_chain(Nwbfile, pos_merge_tables, TableChain): def test_chain_str(chain): """Test that the str of a TableChain object is as expected.""" chain = chain - parent = to_camel_case(chain.parent.table_name) - child = to_camel_case(chain.child.table_name) str_got = str(chain) - str_exp = parent + chain._link_symbol + child + str_exp = ( + full_to_camel(chain.parent) + + chain._link_symbol + + full_to_camel(chain.child) + ) assert str_got == str_exp, "Unexpected str of TableChain object." @@ -45,25 +52,25 @@ def test_chain_repr(chain): """Test that the repr of a TableChain object is as expected.""" repr_got = repr(chain) repr_ext = "Chain: " + chain._link_symbol.join( - [t.table_name for t in chain.objects] + [full_to_camel(t) for t in chain.path] ) assert repr_got == repr_ext, "Unexpected repr of TableChain object." def test_chain_len(chain): """Test that the len of a TableChain object is as expected.""" - assert len(chain) == len(chain.names), "Unexpected len of TableChain." + assert len(chain) == len(chain.path), "Unexpected len of TableChain." def test_chain_getitem(chain): """Test getitem of TableChain object.""" by_int = chain[0] - by_str = chain[chain.names[0]] + by_str = chain[chain.path[0]] assert by_int == by_str, "Getitem by int and str not equal." def test_nolink_join(no_link_chain): - assert no_link_chain.join() is None, "Unexpected join of no link chain." + assert no_link_chain.cascade() is None, "Unexpected join of no link chain." def test_chain_str_no_link(no_link_chain): diff --git a/tests/utils/test_graph.py b/tests/utils/test_graph.py new file mode 100644 index 000000000..7d5257a36 --- /dev/null +++ b/tests/utils/test_graph.py @@ -0,0 +1,143 @@ +import pytest + + +@pytest.fixture(scope="session") +def leaf(lin_merge): + yield lin_merge.LinearizedPositionV1() + + +@pytest.fixture(scope="session") +def restr_graph(leaf, verbose, lin_merge_key): + from spyglass.utils.dj_graph import RestrGraph + + _ = lin_merge_key # linearization merge table populated + + yield RestrGraph( + seed_table=leaf, + table_name=leaf.full_table_name, + restriction=True, + cascade=True, + verbose=verbose, + ) + + +def test_rg_repr(restr_graph, leaf): + """Test that the repr of a RestrGraph object is as expected.""" + repr_got = repr(restr_graph) + + assert "cascade" in repr_got.lower(), "Cascade not in repr." + assert leaf.full_table_name in repr_got, "Table name not in repr." + + +def test_rg_ft(restr_graph): + """Test FreeTable attribute of RestrGraph.""" + assert len(restr_graph.leaf_ft) == 1, "Unexpected # of leaf tables." + assert len(restr_graph["spatial"]) == 2, "Unexpected cascaded table length." + + +def test_rg_restr_ft(restr_graph): + """Test get restricted free tables.""" + ft = restr_graph["spatial_series"] + assert len(ft) == 2, "Unexpected restricted table length." + + +def test_rg_file_paths(restr_graph): + """Test collection of upstream file paths.""" + paths = [p.get("file_path") for p in restr_graph.file_paths] + assert len(paths) == 2, "Unexpected number of file paths." + + +@pytest.fixture(scope="session") +def restr_graph_new_leaf(restr_graph, common): + restr_graph.add_leaf( + table_name=common.common_behav.PositionSource.full_table_name, + restriction=True, + ) + + yield restr_graph + + +def test_add_leaf_cascade(restr_graph_new_leaf): + assert ( + not restr_graph_new_leaf.cascaded + ), "Cascaded flag not set when add leaf." + + +def test_add_leaf_restr_ft(restr_graph_new_leaf): + restr_graph_new_leaf.cascade() + ft = restr_graph_new_leaf._get_ft( + "`common_interval`.`interval_list`", with_restr=True + ) + assert len(ft) == 2, "Unexpected restricted table length." + + +@pytest.fixture(scope="session") +def restr_graph_root(restr_graph, common, lfp_band, lin_v1): + from spyglass.utils.dj_graph import RestrGraph + + yield RestrGraph( + seed_table=common.Session(), + table_name=common.Session.full_table_name, + restriction="True", + direction="down", + cascade=True, + verbose=False, + ) + + +def test_rg_root(restr_graph_root): + assert ( + len(restr_graph_root["trodes_pos_v1"]) == 2 + ), "Incomplete cascade from root." + + +@pytest.mark.parametrize( + "restr, expect_n, msg", + [ + ("pk_attr > 16", 4, "pk no alias"), + ("sk_attr > 17", 3, "sk no alias"), + ("pk_alias_attr > 18", 3, "pk pk alias"), + ("sk_alias_attr > 19", 2, "sk sk alias"), + ("merge_child_attr > 21", 2, "merge child down"), + ({"merge_child_attr": 21}, 1, "dict restr"), + ], +) +def test_restr_from_upstream(graph_tables, restr, expect_n, msg): + msg = "Error in `>>` for " + msg + assert len(graph_tables["ParentNode"]() >> restr) == expect_n, msg + + +@pytest.mark.parametrize( + "table, restr, expect_n, msg", + [ + ("PkNode", "parent_attr > 15", 5, "pk no alias"), + ("SkNode", "parent_attr > 16", 4, "sk no alias"), + ("PkAliasNode", "parent_attr > 17", 2, "pk pk alias"), + ("SkAliasNode", "parent_attr > 18", 2, "sk sk alias"), + ("MergeChild", "parent_attr > 18", 2, "merge child"), + ("MergeChild", {"parent_attr": 18}, 1, "dict restr"), + ], +) +def test_restr_from_downstream(graph_tables, table, restr, expect_n, msg): + msg = "Error in `<<` for " + msg + assert len(graph_tables[table]() << restr) == expect_n, msg + + +def test_restr_many_to_one(graph_tables_many_to_one): + PK = graph_tables_many_to_one["PkSkNode"]() + OP = graph_tables_many_to_one["OtherParentNode"]() + + msg_template = "Error in `%s` for many to one." + + assert len(PK << "other_attr > 14") == 4, msg_template % "<<" + assert len(PK << {"other_attr": 15}) == 2, msg_template % "<<" + assert len(OP >> "pk_sk_attr > 19") == 2, msg_template % ">>" + assert ( + len(OP >> [{"pk_sk_attr": 19}, {"pk_sk_attr": 20}]) == 2 + ), "Error accepting list of dicts for `>>` for many to one." + + +def test_restr_invalid(graph_tables): + PkNode = graph_tables["PkNode"]() + with pytest.raises(ValueError): + len(PkNode << set(["parent_attr > 15", "parent_attr < 20"])) diff --git a/tests/utils/test_mixin.py b/tests/utils/test_mixin.py index faa823c8e..010abf03c 100644 --- a/tests/utils/test_mixin.py +++ b/tests/utils/test_mixin.py @@ -1,7 +1,7 @@ import datajoint as dj import pytest -from tests.conftest import VERBOSE +from tests.conftest import TEARDOWN, VERBOSE @pytest.fixture(scope="module") @@ -16,7 +16,10 @@ class Mixin(SpyglassMixin, dj.Manual): yield Mixin -@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +@pytest.mark.skipif( + not VERBOSE or not TEARDOWN, + reason="Error only on verbose or new declare.", +) def test_bad_prefix(caplog, dj_conn, Mixin): schema_bad = dj.Schema("badprefix", {}, connection=dj_conn) schema_bad(Mixin) @@ -38,6 +41,19 @@ def test_merge_detect(Nwbfile, pos_merge_tables): ), "Merges not detected by mixin." +def test_merge_chain_join(Nwbfile, pos_merge_tables, lin_v1, lfp_merge_key): + """Test that the mixin can join merge chains.""" + _ = lin_v1, lfp_merge_key # merge tables populated + + all_chains = [ + chains.cascade(True, direction="down") + for chains in Nwbfile._merge_chains.values() + ] + end_len = [len(chain[0]) for chain in all_chains if chain] + + assert sum(end_len) == 4, "Merge chains not joined correctly." + + def test_get_chain(Nwbfile, pos_merge_tables): """Test that the mixin can get the chain of a merge.""" lin_parts = Nwbfile._get_chain("linear").part_names @@ -48,7 +64,28 @@ def test_get_chain(Nwbfile, pos_merge_tables): @pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") def test_ddm_warning(Nwbfile, caplog): """Test that the mixin warns on empty delete_downstream_merge.""" - (Nwbfile & "nwb_file_name LIKE 'BadName'").delete_downstream_merge( + (Nwbfile.file_like("BadName")).delete_downstream_merge( reload_cache=True, disable_warnings=False ) assert "No merge deletes found" in caplog.text, "No warning issued." + + +def test_ddm_dry_run(Nwbfile, common, sgp, pos_merge_tables, lin_v1): + """Test that the mixin can dry run delete_downstream_merge.""" + _ = lin_v1 # merge tables populated + pos_output_name = pos_merge_tables[0].full_table_name + + param_field = "trodes_pos_params_name" + trodes_params = sgp.v1.TrodesPosParams() + + rft = (trodes_params & f'{param_field} LIKE "%ups%"').ddm( + reload_cache=True, dry_run=True, return_parts=False + )[pos_output_name][0] + assert len(rft) == 1, "ddm did not return restricted table." + + table_name = [p for p in pos_merge_tables[0].parts() if "trode" in p][0] + assert table_name == rft.full_table_name, "ddm didn't grab right table." + + assert ( + rft.fetch1(param_field) == "single_led_upsampled" + ), "ddm didn't grab right row." From 042fd1cd631f2accd0ed0f25544898c628c65075 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 12:05:54 -0700 Subject: [PATCH 34/60] Transaction on `populate_all_common` (#957) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * WIP: transaction on populate_all_common * ✅ : Seperate rollback and raise err options --- CHANGELOG.md | 1 + notebooks/01_Insert_Data.ipynb | 20 ++- notebooks/py_scripts/01_Insert_Data.py | 18 ++- notebooks/py_scripts/50_MUA_Detection.py | 111 +++++++++++++ src/spyglass/common/common_behav.py | 28 +++- src/spyglass/common/common_dio.py | 15 +- src/spyglass/common/common_ephys.py | 128 ++++++++++----- src/spyglass/common/common_nwbfile.py | 1 + src/spyglass/common/common_session.py | 7 + src/spyglass/common/common_task.py | 10 +- src/spyglass/common/populate_all_common.py | 171 +++++++++++++++----- src/spyglass/data_import/insert_sessions.py | 16 +- src/spyglass/spikesorting/imported.py | 9 +- 13 files changed, 441 insertions(+), 94 deletions(-) create mode 100644 notebooks/py_scripts/50_MUA_Detection.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 231e328d6..bf8804795 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 +- Add rollback option to `populate_all_common` #957 - Add long-distance restrictions via `<<` and `>>` operators. #943 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 diff --git a/notebooks/01_Insert_Data.ipynb b/notebooks/01_Insert_Data.ipynb index e68623e72..2a2297642 100644 --- a/notebooks/01_Insert_Data.ipynb +++ b/notebooks/01_Insert_Data.ipynb @@ -1082,8 +1082,22 @@ "- neural activity (extracellular recording of multiple brain areas)\n", "- etc.\n", "\n", - "_Note:_ this may take time as Spyglass creates the copy. You may see a prompt\n", - "about inserting device information.\n" + "_Notes:_ this may take time as Spyglass creates the copy. You may see a prompt\n", + "about inserting device information.\n", + "\n", + "By default, the session insert process is error permissive. It will log an\n", + "error and continue attempts across various tables. You have two options you can\n", + "toggle to adjust this.\n", + "\n", + "- `rollback_on_fail`: Default False. If True, errors will still be logged for\n", + " all tables and, if any are registered, the `Nwbfile` entry will be deleted.\n", + " This is helpful for knowing why your file failed, and making it easy to retry.\n", + "- `raise_err`: Default False. If True, errors will not be logged and will\n", + " instead be raised. This is useful for debugging and exploring the error stack.\n", + " The end result may be that some tables may still have entries from this file\n", + " that will need to be manually deleted after a failed attempt. 'transactions'\n", + " are used where possible to rollback sibling tables, but child table errors\n", + " will still leave entries from parent tables.\n" ] }, { @@ -1146,7 +1160,7 @@ } ], "source": [ - "sgi.insert_sessions(nwb_file_name)" + "sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_error=False)" ] }, { diff --git a/notebooks/py_scripts/01_Insert_Data.py b/notebooks/py_scripts/01_Insert_Data.py index 975ed4ac5..870c6907a 100644 --- a/notebooks/py_scripts/01_Insert_Data.py +++ b/notebooks/py_scripts/01_Insert_Data.py @@ -198,11 +198,25 @@ # - neural activity (extracellular recording of multiple brain areas) # - etc. # -# _Note:_ this may take time as Spyglass creates the copy. You may see a prompt +# _Notes:_ this may take time as Spyglass creates the copy. You may see a prompt # about inserting device information. # +# By default, the session insert process is error permissive. It will log an +# error and continue attempts across various tables. You have two options you can +# toggle to adjust this. +# +# - `rollback_on_fail`: Default False. If True, errors will still be logged for +# all tables and, if any are registered, the `Nwbfile` entry will be deleted. +# This is helpful for knowing why your file failed, and making it easy to retry. +# - `raise_err`: Default False. If True, errors will not be logged and will +# instead be raised. This is useful for debugging and exploring the error stack. +# The end result may be that some tables may still have entries from this file +# that will need to be manually deleted after a failed attempt. 'transactions' +# are used where possible to rollback sibling tables, but child table errors +# will still leave entries from parent tables. +# -sgi.insert_sessions(nwb_file_name) +sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_error=False) # ## Inspecting the data # diff --git a/notebooks/py_scripts/50_MUA_Detection.py b/notebooks/py_scripts/50_MUA_Detection.py new file mode 100644 index 000000000..bc319ff82 --- /dev/null +++ b/notebooks/py_scripts/50_MUA_Detection.py @@ -0,0 +1,111 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.16.0 +# kernelspec: +# display_name: spyglass +# language: python +# name: python3 +# --- + +# + +import datajoint as dj +from pathlib import Path + +dj.config.load( + Path("../dj_local_conf.json").absolute() +) # load config for database connection info + +from spyglass.mua.v1.mua import MuaEventsV1, MuaEventsParameters + +# - + +MuaEventsParameters() + +MuaEventsV1() + +# + +from spyglass.position import PositionOutput + +nwb_copy_file_name = "mediumnwb20230802_.nwb" + +trodes_s_key = { + "nwb_file_name": nwb_copy_file_name, + "interval_list_name": "pos 0 valid times", + "trodes_pos_params_name": "single_led_upsampled", +} + +pos_merge_id = (PositionOutput.TrodesPosV1 & trodes_s_key).fetch1("merge_id") +pos_merge_id + +# + +from spyglass.spikesorting.analysis.v1.group import ( + SortedSpikesGroup, +) + +sorted_spikes_group_key = { + "nwb_file_name": nwb_copy_file_name, + "sorted_spikes_group_name": "test_group", + "unit_filter_params_name": "default_exclusion", +} + +SortedSpikesGroup & sorted_spikes_group_key + +# + +mua_key = { + "mua_param_name": "default", + **sorted_spikes_group_key, + "pos_merge_id": pos_merge_id, + "detection_interval": "pos 0 valid times", +} + +MuaEventsV1().populate(mua_key) +MuaEventsV1 & mua_key +# - + +mua_times = (MuaEventsV1 & mua_key).fetch1_dataframe() +mua_times + +# + +import matplotlib.pyplot as plt +import numpy as np + +fig, axes = plt.subplots(2, 1, sharex=True, figsize=(15, 4)) +speed = MuaEventsV1.get_speed(mua_key).to_numpy() +time = speed.index.to_numpy() +multiunit_firing_rate = MuaEventsV1.get_firing_rate(mua_key, time) + +time_slice = slice( + np.searchsorted(time, mua_times.loc[10].start_time) - 1_000, + np.searchsorted(time, mua_times.loc[10].start_time) + 5_000, +) + +axes[0].plot( + time[time_slice], + multiunit_firing_rate[time_slice], + color="black", +) +axes[0].set_ylabel("firing rate (Hz)") +axes[0].set_title("multiunit") +axes[1].fill_between(time[time_slice], speed[time_slice], color="lightgrey") +axes[1].set_ylabel("speed (cm/s)") +axes[1].set_xlabel("time (s)") + +for id, mua_time in mua_times.loc[ + np.logical_and( + mua_times["start_time"] > time[time_slice].min(), + mua_times["end_time"] < time[time_slice].max(), + ) +].iterrows(): + axes[0].axvspan( + mua_time["start_time"], mua_time["end_time"], color="red", alpha=0.5 + ) +# - + +(MuaEventsV1 & mua_key).create_figurl( + zscore_mua=True, +) diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index bdb769e73..b7e8d953b 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -43,12 +43,8 @@ class SpatialSeries(SpyglassMixin, dj.Part): name=null: varchar(32) # name of spatial series """ - def populate(self, keys=None): - """Insert position source data from NWB file. - - WARNING: populate method on Manual table is not protected by transaction - protections like other DataJoint tables. - """ + def _no_transaction_make(self, keys=None): + """Insert position source data from NWB file.""" if not isinstance(keys, list): keys = [keys] if isinstance(keys[0], (dj.Table, dj.expression.QueryExpression)): @@ -227,6 +223,12 @@ def _get_column_names(rp, pos_id): return column_names def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] interval_list_name = key["interval_list_name"] @@ -238,7 +240,7 @@ def make(self, key): PositionSource.get_epoch_num(interval_list_name) ] - self.insert1(key) + self.insert1(key, allow_direct_insert=True) self.PosObject.insert( [ dict( @@ -294,6 +296,12 @@ class StateScriptFile(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" """Add a new row to the StateScriptFile table.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) @@ -309,6 +317,7 @@ def make(self, key): ) return # See #849 + script_inserts = [] for associated_file_obj in associated_files.data_interfaces.values(): if not isinstance( associated_file_obj, ndx_franklab_novela.AssociatedFiles @@ -337,10 +346,13 @@ def make(self, key): # find the file associated with this epoch if str(key["epoch"]) in epoch_list: key["file_object_id"] = associated_file_obj.object_id - self.insert1(key) + script_inserts.append(key.copy()) else: logger.info("not a statescript file") + if script_inserts: + self.insert(script_inserts, allow_direct_insert=True) + @schema class VideoFile(SpyglassMixin, dj.Imported): diff --git a/src/spyglass/common/common_dio.py b/src/spyglass/common/common_dio.py index 3db854e6a..629adef47 100644 --- a/src/spyglass/common/common_dio.py +++ b/src/spyglass/common/common_dio.py @@ -27,6 +27,12 @@ class DIOEvents(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -45,10 +51,17 @@ def make(self, key): key["interval_list_name"] = ( Raw() & {"nwb_file_name": nwb_file_name} ).fetch1("interval_list_name") + + dio_inserts = [] for event_series in behav_events.time_series.values(): key["dio_event_name"] = event_series.name key["dio_object_id"] = event_series.object_id - self.insert1(key, skip_duplicates=True) + dio_inserts.append(key.copy()) + self.insert( + dio_inserts, + skip_duplicates=True, + allow_direct_insert=True, + ) def plot_all_dio_events(self, return_fig=False): """Plot all DIO events in the session. diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index 1880340a9..d03f6edff 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -45,6 +45,12 @@ class ElectrodeGroup(SpyglassMixin, dj.Imported): """ def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -69,7 +75,7 @@ def make(self, key): else: # if negative x coordinate # define target location as left hemisphere key["target_hemisphere"] = "Left" - self.insert1(key, skip_duplicates=True) + self.insert1(key, skip_duplicates=True, allow_direct_insert=True) @schema @@ -95,6 +101,12 @@ class Electrode(SpyglassMixin, dj.Imported): """ def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -108,23 +120,32 @@ def make(self, key): else: electrode_config_dicts = dict() + electrode_constants = { + "x_warped": 0, + "y_warped": 0, + "z_warped": 0, + "contacts": "", + } + + electrode_inserts = [] electrodes = nwbf.electrodes.to_dataframe() for elect_id, elect_data in electrodes.iterrows(): - key["electrode_id"] = elect_id - key["name"] = str(elect_id) - key["electrode_group_name"] = elect_data.group_name - key["region_id"] = BrainRegion.fetch_add( - region_name=elect_data.group.location + key.update( + { + "electrode_id": elect_id, + "name": str(elect_id), + "electrode_group_name": elect_data.group_name, + "region_id": BrainRegion.fetch_add( + region_name=elect_data.group.location + ), + "x": elect_data.x, + "y": elect_data.y, + "z": elect_data.z, + "filtering": elect_data.filtering, + "impedance": elect_data.get("imp"), + **electrode_constants, + } ) - key["x"] = elect_data.x - key["y"] = elect_data.y - key["z"] = elect_data.z - key["x_warped"] = 0 - key["y_warped"] = 0 - key["z_warped"] = 0 - key["contacts"] = "" - key["filtering"] = elect_data.filtering - key["impedance"] = elect_data.get("imp") # rough check of whether the electrodes table was created by # rec_to_nwb and has the appropriate custom columns used by @@ -140,13 +161,17 @@ def make(self, key): and "bad_channel" in elect_data and "ref_elect_id" in elect_data ): - key["probe_id"] = elect_data.group.device.probe_type - key["probe_shank"] = elect_data.probe_shank - key["probe_electrode"] = elect_data.probe_electrode - key["bad_channel"] = ( - "True" if elect_data.bad_channel else "False" + key.update( + { + "probe_id": elect_data.group.device.probe_type, + "probe_shank": elect_data.probe_shank, + "probe_electrode": elect_data.probe_electrode, + "bad_channel": ( + "True" if elect_data.bad_channel else "False" + ), + "original_reference_electrode": elect_data.ref_elect_id, + } ) - key["original_reference_electrode"] = elect_data.ref_elect_id # override with information from the config YAML based on primary # key (electrode id) @@ -163,8 +188,13 @@ def make(self, key): ) else: key.update(electrode_config_dicts[elect_id]) + electrode_inserts.append(key.copy()) - self.insert1(key, skip_duplicates=True) + self.insert1( + key, + skip_duplicates=True, + allow_direct_insert=True, # for no_transaction, pop_all_common + ) @classmethod def create_from_config(cls, nwb_file_name: str): @@ -246,10 +276,17 @@ class Raw(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) raw_interval_name = "raw data valid times" + # get the acquisition object try: # TODO this assumes there is a single item in NWBFile.acquisition @@ -261,19 +298,21 @@ def make(self, key): + f"Skipping entry in {self.full_table_name}" ) return + if rawdata.rate is not None: - sampling_rate = rawdata.rate + key["sampling_rate"] = rawdata.rate else: logger.info("Estimating sampling rate...") # NOTE: Only use first 1e6 timepoints to save time - sampling_rate = estimate_sampling_rate( + key["sampling_rate"] = estimate_sampling_rate( np.asarray(rawdata.timestamps[: int(1e6)]), 1.5, verbose=True ) - key["sampling_rate"] = sampling_rate - interval_dict = dict() - interval_dict["nwb_file_name"] = key["nwb_file_name"] - interval_dict["interval_list_name"] = raw_interval_name + interval_dict = { + "nwb_file_name": key["nwb_file_name"], + "interval_list_name": raw_interval_name, + } + if rawdata.rate is not None: interval_dict["valid_times"] = np.array( [[0, len(rawdata.data) / rawdata.rate]] @@ -291,18 +330,25 @@ def make(self, key): # now insert each of the electrodes as an individual row, but with the # same nwb_object_id - key["raw_object_id"] = rawdata.object_id - key["sampling_rate"] = sampling_rate logger.info( - f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz' + f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz\n' + + f'Number of valid intervals:\t{len(interval_dict["valid_times"])}' ) - logger.info( - f'Number of valid intervals:\t{len(interval_dict["valid_times"])}' + + key.update( + { + "raw_object_id": rawdata.object_id, + "interval_list_name": raw_interval_name, + "comments": rawdata.comments, + "description": rawdata.description, + } + ) + + self.insert1( + key, + skip_duplicates=True, + allow_direct_insert=True, ) - key["interval_list_name"] = raw_interval_name - key["comments"] = rawdata.comments - key["description"] = rawdata.description - self.insert1(key, skip_duplicates=True) def nwb_object(self, key): # TODO return the nwb_object; FIX: this should be replaced with a fetch @@ -330,6 +376,12 @@ class SampleCount(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -343,7 +395,7 @@ def make(self, key): ) return # see #849 key["sample_count_object_id"] = sample_count.object_id - self.insert1(key) + self.insert1(key, allow_direct_insert=True) @schema diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index 19700d3b3..d5bba9e51 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -65,6 +65,7 @@ def insert_from_relative_file_name(cls, nwb_file_name): The relative path to the NWB file. """ nwb_file_abs_path = Nwbfile.get_abs_path(nwb_file_name, new_file=True) + assert os.path.exists( nwb_file_abs_path ), f"File does not exist: {nwb_file_abs_path}" diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index acb4a0826..e97934122 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -52,6 +52,12 @@ class Experimenter(SpyglassMixin, dj.Part): """ def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" # These imports must go here to avoid cyclic dependencies # from .common_task import Task, TaskEpoch from .common_interval import IntervalList @@ -114,6 +120,7 @@ def make(self, key): "experiment_description": nwbf.experiment_description, }, skip_duplicates=True, + allow_direct_insert=True, # for populate_all_common ) logger.info("Skipping Apparatus for now...") diff --git a/src/spyglass/common/common_task.py b/src/spyglass/common/common_task.py index 0dffa4ac5..49fd7bb0e 100644 --- a/src/spyglass/common/common_task.py +++ b/src/spyglass/common/common_task.py @@ -97,6 +97,12 @@ class TaskEpoch(SpyglassMixin, dj.Imported): """ def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -120,6 +126,7 @@ def make(self, key): logger.warn(f"No tasks processing module found in {nwbf}\n") return + task_inserts = [] for task in tasks_mod.data_interfaces.values(): if self.check_task_table(task): # check if the task is in the Task table and if not, add it @@ -169,7 +176,8 @@ def make(self, key): break # TODO case when interval is not found is not handled key["interval_list_name"] = interval - self.insert1(key) + task_inserts.append(key.copy()) + self.insert(task_inserts, allow_direct_insert=True) @classmethod def update_entries(cls, restrict={}): diff --git a/src/spyglass/common/populate_all_common.py b/src/spyglass/common/populate_all_common.py index 2972ed145..04df52dec 100644 --- a/src/spyglass/common/populate_all_common.py +++ b/src/spyglass/common/populate_all_common.py @@ -1,3 +1,5 @@ +from typing import List, Union + import datajoint as dj from spyglass.common.common_behav import ( @@ -20,54 +22,147 @@ from spyglass.utils import logger -def populate_all_common(nwb_file_name): - """Insert all common tables for a given NWB file.""" +def log_insert_error( + table: str, err: Exception, error_constants: dict = None +) -> None: + """Log a given error to the InsertError table. + + Parameters + ---------- + table : str + The table name where the error occurred. + err : Exception + The exception that was raised. + error_constants : dict, optional + Dictionary with keys for dj_user, connection_id, and nwb_file_name. + Defaults to checking dj.conn and using "Unknown" for nwb_file_name. + """ + if error_constants is None: + error_constants = dict( + dj_user=dj.config["database.user"], + connection_id=dj.conn().connection_id, + nwb_file_name="Unknown", + ) + InsertError.insert1( + dict( + **error_constants, + table=table.__name__, + error_type=type(err).__name__, + error_message=str(err), + error_raw=str(err), + ) + ) + + +def single_transaction_make( + tables: List[dj.Table], + nwb_file_name: str, + raise_err: bool = False, + error_constants: dict = None, +): + """For each table, run the `_no_transaction_make` method. + + Requires `allow_direct_insert` set to True within each method. Uses + nwb_file_name search table key_source for relevant key. Currently assumes + all tables will have exactly one key_source entry per nwb file. + """ + file_restr = {"nwb_file_name": nwb_file_name} + with Nwbfile.connection.transaction: + for table in tables: + logger.info(f"Populating {table.__name__}...") + + # If imported/computed table, get key from key_source + key_source = getattr(table, "key_source", None) + if key_source is None: # Generate key from parents + parents = table.parents(as_objects=True) + key_source = parents[0].proj() + for parent in parents[1:]: + key_source *= parent.proj() + pop_key = (key_source & file_restr).fetch1("KEY") + + try: + table()._no_transaction_make(pop_key) + except Exception as err: + if raise_err: + raise err + log_insert_error( + table=table, err=err, error_constants=error_constants + ) + + +def populate_all_common( + nwb_file_name, rollback_on_fail=False, raise_err=False +) -> Union[List, None]: + """Insert all common tables for a given NWB file. + + Parameters + ---------- + nwb_file_name : str + The name of the NWB file to populate. + rollback_on_fail : bool, optional + If True, will delete the Session entry if any errors occur. + Defaults to False. + raise_err : bool, optional + If True, will raise any errors that occur during population. + Defaults to False. This will prevent any rollback from occurring. + + Returns + ------- + List + A list of keys for InsertError entries if any errors occurred. + """ from spyglass.spikesorting.imported import ImportedSpikeSorting - key = [(Nwbfile & f"nwb_file_name LIKE '{nwb_file_name}'").proj()] - tables = [ - Session, - # NwbfileKachery, # Not used by default - ElectrodeGroup, - Electrode, - Raw, - SampleCount, - DIOEvents, - # SensorData, # Not used by default. Generates large files - RawPosition, - TaskEpoch, - StateScriptFile, - VideoFile, - PositionSource, - RawPosition, - ImportedSpikeSorting, - ] error_constants = dict( dj_user=dj.config["database.user"], connection_id=dj.conn().connection_id, nwb_file_name=nwb_file_name, ) - for table in tables: - logger.info(f"Populating {table.__name__}...") - try: - table.populate(key) - except Exception as e: - InsertError.insert1( - dict( - **error_constants, - table=table.__name__, - error_type=type(e).__name__, - error_message=str(e), - error_raw=str(e), - ) - ) - query = InsertError & error_constants - if query: - err_tables = query.fetch("table") + table_lists = [ + [ # Tables that can be inserted in a single transaction + Session, + ElectrodeGroup, # Depends on Session + Electrode, # Depends on ElectrodeGroup + Raw, # Depends on Session + SampleCount, # Depends on Session + DIOEvents, # Depends on Session + TaskEpoch, # Depends on Session + ImportedSpikeSorting, # Depends on Session + # NwbfileKachery, # Not used by default + # SensorData, # Not used by default. Generates large files + ], + [ # Tables that depend on above transaction + PositionSource, # Depends on Session + VideoFile, # Depends on TaskEpoch + StateScriptFile, # Depends on TaskEpoch + ], + [ + RawPosition, # Depends on PositionSource + ], + ] + + for tables in table_lists: + single_transaction_make( + tables=tables, + nwb_file_name=nwb_file_name, + raise_err=raise_err, + error_constants=error_constants, + ) + + err_query = InsertError & error_constants + nwbfile_query = Nwbfile & {"nwb_file_name": nwb_file_name} + + if err_query and nwbfile_query and rollback_on_fail: + logger.error(f"Rolling back population for {nwb_file_name}...") + # Should this be safemode=False to prevent confirmation prompt? + nwbfile_query.super_delete(warn=False) + + if err_query: + err_tables = err_query.fetch("table") logger.error( f"Errors occurred during population for {nwb_file_name}:\n\t" + f"Failed tables {err_tables}\n\t" + "See common_usage.InsertError for more details" ) - return query.fetch("KEY") + return err_query.fetch("KEY") diff --git a/src/spyglass/data_import/insert_sessions.py b/src/spyglass/data_import/insert_sessions.py index 329a7be42..a5d539e8e 100644 --- a/src/spyglass/data_import/insert_sessions.py +++ b/src/spyglass/data_import/insert_sessions.py @@ -12,7 +12,11 @@ from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename -def insert_sessions(nwb_file_names: Union[str, List[str]]): +def insert_sessions( + nwb_file_names: Union[str, List[str]], + rollback_on_fail: bool = False, + raise_err: bool = False, +): """ Populate the dj database with new sessions. @@ -23,6 +27,10 @@ def insert_sessions(nwb_file_names: Union[str, List[str]]): existing .nwb files. Each file represents a session. Also accepts strings with glob wildcards (e.g., *) so long as the wildcard specifies exactly one file. + rollback_on_fail : bool, optional + If True, undo all inserts if an error occurs. Default is False. + raise_err : bool, optional + If True, raise an error if an error occurs. Default is False. """ if not isinstance(nwb_file_names, list): @@ -66,7 +74,11 @@ def insert_sessions(nwb_file_names: Union[str, List[str]]): # the raw data in the original file copy_nwb_link_raw_ephys(nwb_file_name, out_nwb_file_name) Nwbfile().insert_from_relative_file_name(out_nwb_file_name) - populate_all_common(out_nwb_file_name) + return populate_all_common( + out_nwb_file_name, + rollback_on_fail=rollback_on_fail, + raise_err=raise_err, + ) def copy_nwb_link_raw_ephys(nwb_file_name, out_nwb_file_name): diff --git a/src/spyglass/spikesorting/imported.py b/src/spyglass/spikesorting/imported.py index ca1bdc9d0..048502081 100644 --- a/src/spyglass/spikesorting/imported.py +++ b/src/spyglass/spikesorting/imported.py @@ -31,6 +31,13 @@ class Annotations(SpyglassMixin, dj.Part): """ def make(self, key): + self._no_transaction_make(key) + + def _no_transaction_make(self, key): + """Make without transaction + + Allows populate_all_common to work within a single transaction.""" + raise RuntimeError("TEMP: This is a test error. Please ignore.") orig_key = copy.deepcopy(key) nwb_file_abs_path = Nwbfile.get_abs_path(key["nwb_file_name"]) @@ -49,7 +56,7 @@ def make(self, key): key["object_id"] = nwbfile.units.object_id - self.insert1(key, skip_duplicates=True) + self.insert1(key, skip_duplicates=True, allow_direct_insert=True) part_name = SpikeSortingOutput._part_name(self.table_name) SpikeSortingOutput._merge_insert( From a6e2ea6414dc1725e1afe733d3c5fe6bf1654c60 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Fri, 10 May 2024 15:15:57 -0700 Subject: [PATCH 35/60] Permit multiple restrict_by (#969) * Permit multiple restrict_by * Update Changelog * Fix typo --- CHANGELOG.md | 2 +- docs/src/misc/mixin.md | 20 ++++++++++--------- src/spyglass/decoding/v1/waveform_features.py | 2 +- src/spyglass/utils/dj_merge_tables.py | 20 +------------------ src/spyglass/utils/dj_mixin.py | 2 +- 5 files changed, 15 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf8804795..299a264b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 - Add rollback option to `populate_all_common` #957 -- Add long-distance restrictions via `<<` and `>>` operators. #943 +- Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 ## [0.5.2] (April 22, 2024) diff --git a/docs/src/misc/mixin.md b/docs/src/misc/mixin.md index 6b3884551..229747402 100644 --- a/docs/src/misc/mixin.md +++ b/docs/src/misc/mixin.md @@ -59,37 +59,39 @@ key and `>>` as a shorthand for `restrict_by` a downstream key. ```python from spyglass.example import AnyTable -AnyTable >> 'downsteam_attribute="value"' -AnyTable << 'upstream_attribute="value"' +AnyTable() << 'upstream_attribute="value"' +AnyTable() >> 'downsteam_attribute="value"' # Equivalent to -AnyTable.restrict_by('upstream_attribute="value"', direction="up") -AnyTable.restrict_by('downsteam_attribute="value"', direction="down") +AnyTable().restrict_by('downsteam_attribute="value"', direction="down") +AnyTable().restrict_by('upstream_attribute="value"', direction="up") ``` Some caveats to this function: 1. 'Peripheral' tables, like `IntervalList` and `AnalysisNwbfile` make it hard to determine the correct parent/child relationship and have been removed - from this search. + from this search by default. 2. This function will raise an error if it attempts to check a table that has not been imported into the current namespace. It is best used for exploring and debugging, not for production code. 3. It's hard to determine the attributes in a mixed dictionary/string restriction. If you are having trouble, try using a pure string restriction. -4. The most direct path to your restriction may not be the path took, especially - when using Merge Tables. When the result is empty see the warning about the - path used. Then, ban tables from the search to force a different path. +4. The most direct path to your restriction may not be the path your data took, + especially when using Merge Tables. When the result is empty see the + warning about the path used. Then, ban tables from the search to force a + different path. ```python -my_table = MyTable() # must be instantced +my_table = MyTable() # must be instanced my_table.ban_search_table(UnwantedTable1) my_table.ban_search_table([UnwantedTable2, UnwantedTable3]) my_table.unban_search_table(UnwantedTable3) my_table.see_banned_tables() my_table << my_restriction +my_table << upstream_restriction >> downstream_restriction ``` When providing a restriction of the parent, use 'up' direction. When providing a diff --git a/src/spyglass/decoding/v1/waveform_features.py b/src/spyglass/decoding/v1/waveform_features.py index 4a999accd..536ed4864 100644 --- a/src/spyglass/decoding/v1/waveform_features.py +++ b/src/spyglass/decoding/v1/waveform_features.py @@ -82,7 +82,7 @@ def supported_waveform_features(self) -> list[str]: @schema -class UnitWaveformFeaturesSelection(dj.Manual): +class UnitWaveformFeaturesSelection(SpyglassMixin, dj.Manual): definition = """ -> SpikeSortingOutput.proj(spikesorting_merge_id="merge_id") -> WaveformFeaturesParams diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index 0b8f16de6..ce96fe00e 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -830,25 +830,7 @@ def delete_downstream_merge( ) -> list: """Given a table/restriction, id or delete relevant downstream merge entries - Parameters - ---------- - table: dj.Table - DataJoint table or restriction thereof - restriction: str - Optional restriction to apply before deletion from merge/part - tables. If not provided, delete all downstream entries. - dry_run: bool - Default True. If true, return list of tuples, merge/part tables - downstream of table input. Otherwise, delete merge/part table entries. - disable_warning: bool - Default False. If True, don't warn about restrictions on table object. - kwargs: dict - Additional keyword arguments for DataJoint delete. - - Returns - ------- - List[Tuple[dj.Table, dj.Table]] - Entries in merge/part tables downstream of table input. + Passthrough to SpyglassMixin.delete_downstream_merge """ logger.warning( "DEPRECATED: This function will be removed in `0.6`. " diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 08fa377b3..cf1471ee6 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -886,7 +886,7 @@ def restrict_by( if return_graph: return graph - ret = graph.leaf_ft[0] + ret = self & graph._get_restr(self.full_table_name) if len(ret) == len(self) or len(ret) == 0: logger.warning( f"Failed to restrict with path: {graph.path_str}\n\t" From 113ce9a25a1bf2f4dfb3c3cce16c80b9251b57d5 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Mon, 13 May 2024 12:29:43 -0700 Subject: [PATCH 36/60] Allow dlc pipeline to run without prior position tracking (#970) * fix dlc pose estimation populate if no raw position data * allow dlc pipeline to run without raw spatial data * update changelog * string formatting * fix analysis nwb create time --- CHANGELOG.md | 5 ++ .../position/v1/position_dlc_centroid.py | 19 +++++-- .../position/v1/position_dlc_orient.py | 25 ++++++--- .../v1/position_dlc_pose_estimation.py | 53 ++++++++++++------- .../position/v1/position_dlc_position.py | 9 ++-- .../position/v1/position_dlc_selection.py | 8 +-- 6 files changed, 82 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 299a264b7..4b0855fe7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,11 @@ - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 +### Pipelines + +- DLC + - Allow dlc without pre-existing tracking data #950 + ## [0.5.2] (April 22, 2024) ### Infrastructure diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index e989265da..5b96d19db 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -268,17 +268,26 @@ def make(self, key): ) position = pynwb.behavior.Position() velocity = pynwb.behavior.BehavioralTimeSeries() - spatial_series = (RawPosition() & key).fetch_nwb()[0][ - "raw_position" - ] + if ( + RawPosition & key + ): # if spatial series exists, get metadata from there + spatial_series = (RawPosition() & key).fetch_nwb()[0][ + "raw_position" + ] + reference_frame = spatial_series.reference_frame + comments = spatial_series.comments + else: + reference_frame = "" + comments = "no comments" + METERS_PER_CM = 0.01 position.create_spatial_series( name="position", timestamps=final_df.index.to_numpy(), conversion=METERS_PER_CM, data=final_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=reference_frame, + comments=comments, description="x_position, y_position", ) velocity.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index e1e5c668b..0c873fff4 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -1,3 +1,5 @@ +from time import time + import datajoint as dj import numpy as np import pandas as pd @@ -85,9 +87,7 @@ class DLCOrientation(SpyglassMixin, dj.Computed): def make(self, key): # Get labels to smooth from Parameters table - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() cohort_entries = DLCSmoothInterpCohort.BodyPart & key pos_df = pd.concat( { @@ -133,15 +133,28 @@ def make(self, key): final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) - spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) + if ( + RawPosition & key + ): # if spatial series exists, get metadata from there + spatial_series = (RawPosition() & key).fetch_nwb()[0][ + "raw_position" + ] + reference_frame = spatial_series.reference_frame + comments = spatial_series.comments + else: + reference_frame = "" + comments = "no comments" orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( name="orientation", timestamps=final_df.index.to_numpy(), conversion=1.0, data=final_df["orientation"].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=reference_frame, + comments=comments, description="orientation", ) nwb_analysis_file = AnalysisNwbfile() diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index dfc6095a5..bf56fb6fd 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -232,25 +232,38 @@ def make(self, key): dlc_result.creation_time ).strftime("%Y-%m-%d %H:%M:%S") - logger.logger.info("getting raw position") - interval_list_name = ( - convert_epoch_interval_name_to_position_interval_name( - { - "nwb_file_name": key["nwb_file_name"], - "epoch": key["epoch"], - }, - populate_missing=False, + # get video information + _, _, meters_per_pixel, video_time = get_video_path(key) + # check if a position interval exists for this epoch + try: + interval_list_name = ( + convert_epoch_interval_name_to_position_interval_name( + { + "nwb_file_name": key["nwb_file_name"], + "epoch": key["epoch"], + }, + populate_missing=False, + ) ) - ) - spatial_series = ( - RawPosition() - & {**key, "interval_list_name": interval_list_name} - ).fetch_nwb()[0]["raw_position"] - _, _, _, video_time = get_video_path(key) - pos_time = spatial_series.timestamps - # TODO: should get timestamps from VideoFile, but need the video_frame_ind from RawPosition, - # which also has timestamps - key["meters_per_pixel"] = spatial_series.conversion + raw_position = True + except KeyError: + raw_position = False + + if raw_position: + logger.logger.info("Getting raw position") + spatial_series = ( + RawPosition() + & {**key, "interval_list_name": interval_list_name} + ).fetch_nwb()[0]["raw_position"] + pos_time = spatial_series.timestamps + reference_frame = spatial_series.reference_frame + comments = spatial_series.comments + else: + pos_time = video_time + reference_frame = "" + comments = "no comments" + + key["meters_per_pixel"] = meters_per_pixel # Insert entry into DLCPoseEstimation logger.logger.info( @@ -292,8 +305,8 @@ def make(self, key): timestamps=part_df.time.to_numpy(), conversion=METERS_PER_CM, data=part_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=reference_frame, + comments=comments, description="x_position, y_position", ) likelihood.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 436d890d5..11c7019f3 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -1,3 +1,5 @@ +from time import time + import datajoint as dj import numpy as np import pandas as pd @@ -167,9 +169,7 @@ def make(self, key): path=f"{output_dir.as_posix()}/log.log", print_console=False, ) as logger: - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() logger.logger.info("-----------------------") idx = pd.IndexSlice # Get labels to smooth from Parameters table @@ -227,6 +227,9 @@ def make(self, key): .fetch_nwb()[0]["dlc_pose_estimation_position"] .get_spatial_series() ) + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) # Add dataframe to AnalysisNwbfile nwb_analysis_file = AnalysisNwbfile() position = pynwb.behavior.Position() diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 74354db31..facfb8e25 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -1,5 +1,6 @@ import copy from pathlib import Path +from time import time import datajoint as dj import numpy as np @@ -58,9 +59,7 @@ class DLCPosV1(SpyglassMixin, dj.Computed): def make(self, key): orig_key = copy.deepcopy(key) # Add to Analysis NWB file - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() key["pose_eval_result"] = self.evaluate_pose_estimation(key) pos_nwb = (DLCCentroid & key).fetch_nwb()[0] @@ -114,6 +113,9 @@ def make(self, key): comments=vid_frame_obj.comments, ) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) nwb_analysis_file = AnalysisNwbfile() key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], orientation From 9d8b19a7b458b9fff0f8bfb06133315e8403b91f Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Mon, 13 May 2024 12:33:13 -0700 Subject: [PATCH 37/60] Revert "Allow dlc pipeline to run without prior position tracking (#970)" (#972) This reverts commit 113ce9a25a1bf2f4dfb3c3cce16c80b9251b57d5. Co-authored-by: Chris Brozdowski --- CHANGELOG.md | 5 -- .../position/v1/position_dlc_centroid.py | 19 ++----- .../position/v1/position_dlc_orient.py | 25 +++------ .../v1/position_dlc_pose_estimation.py | 53 +++++++------------ .../position/v1/position_dlc_position.py | 9 ++-- .../position/v1/position_dlc_selection.py | 8 ++- 6 files changed, 37 insertions(+), 82 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b0855fe7..299a264b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,11 +14,6 @@ - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 -### Pipelines - -- DLC - - Allow dlc without pre-existing tracking data #950 - ## [0.5.2] (April 22, 2024) ### Infrastructure diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index 5b96d19db..e989265da 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -268,26 +268,17 @@ def make(self, key): ) position = pynwb.behavior.Position() velocity = pynwb.behavior.BehavioralTimeSeries() - if ( - RawPosition & key - ): # if spatial series exists, get metadata from there - spatial_series = (RawPosition() & key).fetch_nwb()[0][ - "raw_position" - ] - reference_frame = spatial_series.reference_frame - comments = spatial_series.comments - else: - reference_frame = "" - comments = "no comments" - + spatial_series = (RawPosition() & key).fetch_nwb()[0][ + "raw_position" + ] METERS_PER_CM = 0.01 position.create_spatial_series( name="position", timestamps=final_df.index.to_numpy(), conversion=METERS_PER_CM, data=final_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=reference_frame, - comments=comments, + reference_frame=spatial_series.reference_frame, + comments=spatial_series.comments, description="x_position, y_position", ) velocity.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index 0c873fff4..e1e5c668b 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -1,5 +1,3 @@ -from time import time - import datajoint as dj import numpy as np import pandas as pd @@ -87,7 +85,9 @@ class DLCOrientation(SpyglassMixin, dj.Computed): def make(self, key): # Get labels to smooth from Parameters table - AnalysisNwbfile()._creation_times["pre_create_time"] = time() + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) cohort_entries = DLCSmoothInterpCohort.BodyPart & key pos_df = pd.concat( { @@ -133,28 +133,15 @@ def make(self, key): final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) - if ( - RawPosition & key - ): # if spatial series exists, get metadata from there - spatial_series = (RawPosition() & key).fetch_nwb()[0][ - "raw_position" - ] - reference_frame = spatial_series.reference_frame - comments = spatial_series.comments - else: - reference_frame = "" - comments = "no comments" + spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( name="orientation", timestamps=final_df.index.to_numpy(), conversion=1.0, data=final_df["orientation"].to_numpy(), - reference_frame=reference_frame, - comments=comments, + reference_frame=spatial_series.reference_frame, + comments=spatial_series.comments, description="orientation", ) nwb_analysis_file = AnalysisNwbfile() diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index bf56fb6fd..dfc6095a5 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -232,38 +232,25 @@ def make(self, key): dlc_result.creation_time ).strftime("%Y-%m-%d %H:%M:%S") - # get video information - _, _, meters_per_pixel, video_time = get_video_path(key) - # check if a position interval exists for this epoch - try: - interval_list_name = ( - convert_epoch_interval_name_to_position_interval_name( - { - "nwb_file_name": key["nwb_file_name"], - "epoch": key["epoch"], - }, - populate_missing=False, - ) + logger.logger.info("getting raw position") + interval_list_name = ( + convert_epoch_interval_name_to_position_interval_name( + { + "nwb_file_name": key["nwb_file_name"], + "epoch": key["epoch"], + }, + populate_missing=False, ) - raw_position = True - except KeyError: - raw_position = False - - if raw_position: - logger.logger.info("Getting raw position") - spatial_series = ( - RawPosition() - & {**key, "interval_list_name": interval_list_name} - ).fetch_nwb()[0]["raw_position"] - pos_time = spatial_series.timestamps - reference_frame = spatial_series.reference_frame - comments = spatial_series.comments - else: - pos_time = video_time - reference_frame = "" - comments = "no comments" - - key["meters_per_pixel"] = meters_per_pixel + ) + spatial_series = ( + RawPosition() + & {**key, "interval_list_name": interval_list_name} + ).fetch_nwb()[0]["raw_position"] + _, _, _, video_time = get_video_path(key) + pos_time = spatial_series.timestamps + # TODO: should get timestamps from VideoFile, but need the video_frame_ind from RawPosition, + # which also has timestamps + key["meters_per_pixel"] = spatial_series.conversion # Insert entry into DLCPoseEstimation logger.logger.info( @@ -305,8 +292,8 @@ def make(self, key): timestamps=part_df.time.to_numpy(), conversion=METERS_PER_CM, data=part_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=reference_frame, - comments=comments, + reference_frame=spatial_series.reference_frame, + comments=spatial_series.comments, description="x_position, y_position", ) likelihood.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 11c7019f3..436d890d5 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -1,5 +1,3 @@ -from time import time - import datajoint as dj import numpy as np import pandas as pd @@ -169,7 +167,9 @@ def make(self, key): path=f"{output_dir.as_posix()}/log.log", print_console=False, ) as logger: - AnalysisNwbfile()._creation_times["pre_create_time"] = time() + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) logger.logger.info("-----------------------") idx = pd.IndexSlice # Get labels to smooth from Parameters table @@ -227,9 +227,6 @@ def make(self, key): .fetch_nwb()[0]["dlc_pose_estimation_position"] .get_spatial_series() ) - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) # Add dataframe to AnalysisNwbfile nwb_analysis_file = AnalysisNwbfile() position = pynwb.behavior.Position() diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index facfb8e25..74354db31 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -1,6 +1,5 @@ import copy from pathlib import Path -from time import time import datajoint as dj import numpy as np @@ -59,7 +58,9 @@ class DLCPosV1(SpyglassMixin, dj.Computed): def make(self, key): orig_key = copy.deepcopy(key) # Add to Analysis NWB file - AnalysisNwbfile()._creation_times["pre_create_time"] = time() + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) key["pose_eval_result"] = self.evaluate_pose_estimation(key) pos_nwb = (DLCCentroid & key).fetch_nwb()[0] @@ -113,9 +114,6 @@ def make(self, key): comments=vid_frame_obj.comments, ) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) nwb_analysis_file = AnalysisNwbfile() key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], orientation From 97373ea6f0b6e67f016068fc71025f6c53bee919 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Mon, 13 May 2024 14:42:32 -0500 Subject: [PATCH 38/60] Fix test fails related to #957 (#971) * Address failing tests * Revert to bare make for no transaction * Update changelog --- CHANGELOG.md | 2 +- src/spyglass/common/common_behav.py | 43 +++++++++++----------- src/spyglass/common/common_dio.py | 3 -- src/spyglass/common/common_ephys.py | 16 +------- src/spyglass/common/common_session.py | 3 -- src/spyglass/common/common_task.py | 6 --- src/spyglass/common/populate_all_common.py | 22 +++++------ src/spyglass/spikesorting/imported.py | 4 -- src/spyglass/utils/dj_merge_tables.py | 10 ----- src/spyglass/utils/dj_mixin.py | 10 +++++ tests/common/test_behav.py | 14 +++---- tests/conftest.py | 2 +- tests/position/test_trodes.py | 6 --- 13 files changed, 54 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 299a264b7..103240dca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ - Create class `SpyglassGroupPart` to aid delete propagations #899 - Fix bug report template #955 -- Add rollback option to `populate_all_common` #957 +- Add rollback option to `populate_all_common` #957, #971 - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index b7e8d953b..b206ed61f 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -1,7 +1,7 @@ import pathlib import re from functools import reduce -from typing import Dict +from typing import Dict, List, Union import datajoint as dj import ndx_franklab_novela @@ -43,7 +43,14 @@ class SpatialSeries(SpyglassMixin, dj.Part): name=null: varchar(32) # name of spatial series """ - def _no_transaction_make(self, keys=None): + def populate(self, *args, **kwargs): + logger.warning( + "PositionSource is a manual table with a custom `make`." + + " Use `make` instead." + ) + self.make(*args, **kwargs) + + def make(self, keys: Union[List[Dict], dj.Table]): """Insert position source data from NWB file.""" if not isinstance(keys, list): keys = [keys] @@ -52,10 +59,7 @@ def _no_transaction_make(self, keys=None): for key in keys: nwb_file_name = key.get("nwb_file_name") if not nwb_file_name: - raise ValueError( - "PositionSource.populate is an alias for a non-computed table " - + "and must be passed a key with nwb_file_name" - ) + raise ValueError("PositionSource.make requires nwb_file_name") self.insert_from_nwbfile(nwb_file_name, skip_duplicates=True) @classmethod @@ -106,7 +110,7 @@ def insert_from_nwbfile(cls, nwb_file_name, skip_duplicates=False) -> None: ) ) - with cls.connection.transaction: + with cls._safe_context(): IntervalList.insert(intervals, skip_duplicates=skip_duplicates) cls.insert(sources, skip_duplicates=skip_duplicates) cls.SpatialSeries.insert( @@ -223,9 +227,6 @@ def _get_column_names(rp, pos_id): return column_names def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" @@ -296,9 +297,6 @@ class StateScriptFile(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" @@ -433,7 +431,9 @@ def _no_transaction_make(self, key, verbose=True): + "in CameraDevice table." ) key["video_file_object_id"] = video_obj.object_id - self.insert1(key) + self.insert1( + key, skip_duplicates=True, allow_direct_insert=True + ) is_found = True if not is_found and verbose: @@ -567,7 +567,7 @@ def _no_transaction_make(self, key): # Insert into table key["position_interval_name"] = matching_pos_intervals[0] - self.insert1(key, allow_direct_insert=True) + self.insert1(key, skip_duplicates=True, allow_direct_insert=True) logger.info( "Populated PosIntervalMap for " + f'{nwb_file_name}, {key["interval_list_name"]}' @@ -660,9 +660,10 @@ def populate_position_interval_map_session(nwb_file_name: str): for interval_name in (TaskEpoch & {"nwb_file_name": nwb_file_name}).fetch( "interval_list_name" ): - PositionIntervalMap.populate( - { - "nwb_file_name": nwb_file_name, - "interval_list_name": interval_name, - } - ) + with PositionIntervalMap._safe_context(): + PositionIntervalMap().make( + { + "nwb_file_name": nwb_file_name, + "interval_list_name": interval_name, + } + ) diff --git a/src/spyglass/common/common_dio.py b/src/spyglass/common/common_dio.py index 629adef47..228e9caf9 100644 --- a/src/spyglass/common/common_dio.py +++ b/src/spyglass/common/common_dio.py @@ -27,9 +27,6 @@ class DIOEvents(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index d03f6edff..4cddc099d 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -45,9 +45,6 @@ class ElectrodeGroup(SpyglassMixin, dj.Imported): """ def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" @@ -101,9 +98,6 @@ class Electrode(SpyglassMixin, dj.Imported): """ def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" @@ -190,8 +184,8 @@ def _no_transaction_make(self, key): key.update(electrode_config_dicts[elect_id]) electrode_inserts.append(key.copy()) - self.insert1( - key, + self.insert( + electrode_inserts, skip_duplicates=True, allow_direct_insert=True, # for no_transaction, pop_all_common ) @@ -276,9 +270,6 @@ class Raw(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" @@ -376,9 +367,6 @@ class SampleCount(SpyglassMixin, dj.Imported): _nwb_table = Nwbfile def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index e97934122..b8139939a 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -52,9 +52,6 @@ class Experimenter(SpyglassMixin, dj.Part): """ def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" diff --git a/src/spyglass/common/common_task.py b/src/spyglass/common/common_task.py index 49fd7bb0e..d63901ec2 100644 --- a/src/spyglass/common/common_task.py +++ b/src/spyglass/common/common_task.py @@ -97,12 +97,6 @@ class TaskEpoch(SpyglassMixin, dj.Imported): """ def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): - """Make without transaction - - Allows populate_all_common to work within a single transaction.""" nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) diff --git a/src/spyglass/common/populate_all_common.py b/src/spyglass/common/populate_all_common.py index 04df52dec..e78b68de1 100644 --- a/src/spyglass/common/populate_all_common.py +++ b/src/spyglass/common/populate_all_common.py @@ -60,7 +60,7 @@ def single_transaction_make( raise_err: bool = False, error_constants: dict = None, ): - """For each table, run the `_no_transaction_make` method. + """For each table, run the `make` method directly instead of `populate`. Requires `allow_direct_insert` set to True within each method. Uses nwb_file_name search table key_source for relevant key. Currently assumes @@ -78,16 +78,16 @@ def single_transaction_make( key_source = parents[0].proj() for parent in parents[1:]: key_source *= parent.proj() - pop_key = (key_source & file_restr).fetch1("KEY") - try: - table()._no_transaction_make(pop_key) - except Exception as err: - if raise_err: - raise err - log_insert_error( - table=table, err=err, error_constants=error_constants - ) + for pop_key in (key_source & file_restr).fetch("KEY"): + try: + table().make(pop_key) + except Exception as err: + if raise_err: + raise err + log_insert_error( + table=table, err=err, error_constants=error_constants + ) def populate_all_common( @@ -123,7 +123,6 @@ def populate_all_common( [ # Tables that can be inserted in a single transaction Session, ElectrodeGroup, # Depends on Session - Electrode, # Depends on ElectrodeGroup Raw, # Depends on Session SampleCount, # Depends on Session DIOEvents, # Depends on Session @@ -133,6 +132,7 @@ def populate_all_common( # SensorData, # Not used by default. Generates large files ], [ # Tables that depend on above transaction + Electrode, # Depends on ElectrodeGroup PositionSource, # Depends on Session VideoFile, # Depends on TaskEpoch StateScriptFile, # Depends on TaskEpoch diff --git a/src/spyglass/spikesorting/imported.py b/src/spyglass/spikesorting/imported.py index 048502081..7e518d6d8 100644 --- a/src/spyglass/spikesorting/imported.py +++ b/src/spyglass/spikesorting/imported.py @@ -31,13 +31,9 @@ class Annotations(SpyglassMixin, dj.Part): """ def make(self, key): - self._no_transaction_make(key) - - def _no_transaction_make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" - raise RuntimeError("TEMP: This is a test error. Please ignore.") orig_key = copy.deepcopy(key) nwb_file_abs_path = Nwbfile.get_abs_path(key["nwb_file_name"]) diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index ce96fe00e..37a51b674 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -1,4 +1,3 @@ -from contextlib import nullcontext from inspect import getmodule from itertools import chain as iter_chain from pprint import pprint @@ -369,15 +368,6 @@ def _merge_insert(cls, rows: list, part_name: str = None, **kwargs) -> None: for part, part_entries in parts_entries.items(): part.insert(part_entries, **kwargs) - @classmethod - def _safe_context(cls): - """Return transaction if not already in one.""" - return ( - cls.connection.transaction - if not cls.connection.in_transaction - else nullcontext() - ) - @classmethod def _ensure_dependencies_loaded(cls) -> None: """Ensure connection dependencies loaded. diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index cf1471ee6..be6063d04 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -1,6 +1,7 @@ from atexit import register as exit_register from atexit import unregister as exit_unregister from collections import OrderedDict +from contextlib import nullcontext from functools import cached_property from inspect import stack as inspect_stack from os import environ @@ -121,6 +122,15 @@ def file_like(self, name=None, **kwargs): return return self & f"{attr} LIKE '%{name}%'" + @classmethod + def _safe_context(cls): + """Return transaction if not already in one.""" + return ( + cls.connection.transaction + if not cls.connection.in_transaction + else nullcontext() + ) + # ------------------------------- fetch_nwb ------------------------------- @cached_property diff --git a/tests/common/test_behav.py b/tests/common/test_behav.py index 28c205442..1f4767dfb 100644 --- a/tests/common/test_behav.py +++ b/tests/common/test_behav.py @@ -22,15 +22,15 @@ def test_valid_epoch_num(common): assert epoch_num == 1, "PositionSource get_epoch_num failed" -def test_invalid_populate(common): - """Test invalid populate""" - with pytest.raises(ValueError): - common.PositionSource.populate(dict()) +def test_possource_make(common): + """Test custom populate""" + common.PositionSource().make(common.Session()) -def test_custom_populate(common): - """Test custom populate""" - common.PositionSource.populate(common.Session()) +def test_possource_make_invalid(common): + """Test invalid populate""" + with pytest.raises(ValueError): + common.PositionSource().make(dict()) def test_raw_position_fetchnwb(common, mini_pos, mini_pos_interval_dict): diff --git a/tests/conftest.py b/tests/conftest.py index 7950854d6..cd9350ff1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -316,7 +316,7 @@ def mini_insert( if len(Nwbfile()) != 0: dj_logger.warning("Skipping insert, use existing data.") else: - insert_sessions(mini_path.name) + insert_sessions(mini_path.name, raise_err=True) if len(Session()) == 0: raise ValueError("No sessions inserted.") diff --git a/tests/position/test_trodes.py b/tests/position/test_trodes.py index 81a515cf1..d4bc617f6 100644 --- a/tests/position/test_trodes.py +++ b/tests/position/test_trodes.py @@ -59,9 +59,3 @@ def test_fetch_df(trodes_pos_v1, trodes_params): ) hash_exp = "5296e74dea2e5e68d39f81bc81723a12" assert hash_df == hash_exp, "Dataframe differs from expected" - - -def test_null_video(sgp): - """Note: This will change if video is added to the test data.""" - with pytest.raises(FileNotFoundError): - sgp.v1.TrodesPosVideo().populate() From 00bd5d8f8d2896b951cdd0e6c51f85e59ef4e474 Mon Sep 17 00:00:00 2001 From: Eric Denovellis Date: Mon, 13 May 2024 14:13:28 -0700 Subject: [PATCH 39/60] Allow dlc pipeline to run without prior position tracking (#973) * fix dlc pose estimation populate if no raw position data * allow dlc pipeline to run without raw spatial data * update changelog * string formatting * fix analysis nwb create time * review changes * allow empty returns from convert_epoch_interval_name_ without error * switch to getattr --------- Co-authored-by: Sam Bray Co-authored-by: Chris Brozdowski --- CHANGELOG.md | 5 +++ src/spyglass/common/common_behav.py | 6 +--- .../position/v1/position_dlc_centroid.py | 14 ++++---- .../position/v1/position_dlc_orient.py | 19 ++++++---- .../v1/position_dlc_pose_estimation.py | 36 +++++++++++-------- .../position/v1/position_dlc_position.py | 9 +++-- .../position/v1/position_dlc_selection.py | 8 +++-- 7 files changed, 59 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 103240dca..096ec4fe9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,11 @@ - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 +### Pipelines + +- DLC + - Allow dlc without pre-existing tracking data #973 + ## [0.5.2] (April 22, 2024) ### Infrastructure diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index b206ed61f..67e6e35d9 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -613,11 +613,7 @@ def convert_epoch_interval_name_to_position_interval_name( if len(pos_query) == 0: if populate_missing: PositionIntervalMap()._no_transaction_make(key) - else: - raise KeyError( - f"{key} must be populated in the PositionIntervalMap table " - + "prior to your current populate call" - ) + pos_query = PositionIntervalMap & key if len(pos_query) == 0: logger.info(f"No position intervals found for {key}") diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index e989265da..f1f077d6a 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -268,17 +268,19 @@ def make(self, key): ) position = pynwb.behavior.Position() velocity = pynwb.behavior.BehavioralTimeSeries() - spatial_series = (RawPosition() & key).fetch_nwb()[0][ - "raw_position" - ] + if query := (RawPosition & key): + spatial_series = query.fetch_nwb()[0]["raw_position"] + else: + spatial_series = None + METERS_PER_CM = 0.01 position.create_spatial_series( name="position", timestamps=final_df.index.to_numpy(), conversion=METERS_PER_CM, data=final_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=getattr(spatial_series, "reference_frame", ""), + comments=getattr(spatial_series, "comments", "no comments"), description="x_position, y_position", ) velocity.create_timeseries( @@ -289,7 +291,7 @@ def make(self, key): data=velocity_df.loc[ :, idx[("velocity_x", "velocity_y", "speed")] ].to_numpy(), - comments=spatial_series.comments, + comments=getattr(spatial_series, "comments", "no comments"), description="x_velocity, y_velocity, speed", ) velocity.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index e1e5c668b..f64802a59 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -1,3 +1,5 @@ +from time import time + import datajoint as dj import numpy as np import pandas as pd @@ -85,9 +87,7 @@ class DLCOrientation(SpyglassMixin, dj.Computed): def make(self, key): # Get labels to smooth from Parameters table - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() cohort_entries = DLCSmoothInterpCohort.BodyPart & key pos_df = pd.concat( { @@ -133,15 +133,22 @@ def make(self, key): final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) - spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) + # if spatial series exists, get metadata from there + if query := (RawPosition & key): + spatial_series = query.fetch_nwb()[0]["raw_position"] + else: + spatial_series = None orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( name="orientation", timestamps=final_df.index.to_numpy(), conversion=1.0, data=final_df["orientation"].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=getattr(spatial_series, "reference_frame", ""), + comments=getattr(spatial_series, "comments", "no comments"), description="orientation", ) nwb_analysis_file = AnalysisNwbfile() diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index dfc6095a5..35d21345c 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -232,8 +232,10 @@ def make(self, key): dlc_result.creation_time ).strftime("%Y-%m-%d %H:%M:%S") - logger.logger.info("getting raw position") - interval_list_name = ( + # get video information + _, _, meters_per_pixel, video_time = get_video_path(key) + # check if a position interval exists for this epoch + if interval_list_name := ( convert_epoch_interval_name_to_position_interval_name( { "nwb_file_name": key["nwb_file_name"], @@ -241,16 +243,16 @@ def make(self, key): }, populate_missing=False, ) - ) - spatial_series = ( - RawPosition() - & {**key, "interval_list_name": interval_list_name} - ).fetch_nwb()[0]["raw_position"] - _, _, _, video_time = get_video_path(key) - pos_time = spatial_series.timestamps - # TODO: should get timestamps from VideoFile, but need the video_frame_ind from RawPosition, - # which also has timestamps - key["meters_per_pixel"] = spatial_series.conversion + ): + logger.logger.info("Getting raw position") + spatial_series = ( + RawPosition() + & {**key, "interval_list_name": interval_list_name} + ).fetch_nwb()[0]["raw_position"] + else: + spatial_series = None + + key["meters_per_pixel"] = meters_per_pixel # Insert entry into DLCPoseEstimation logger.logger.info( @@ -282,7 +284,9 @@ def make(self, key): part_df = convert_to_cm(part_df, meters_per_pixel) logger.logger.info("adding timestamps to DataFrame") part_df = add_timestamps( - part_df, pos_time=pos_time, video_time=video_time + part_df, + pos_time=getattr(spatial_series, "timestamps", video_time), + video_time=video_time, ) key["bodypart"] = body_part position = pynwb.behavior.Position() @@ -292,8 +296,10 @@ def make(self, key): timestamps=part_df.time.to_numpy(), conversion=METERS_PER_CM, data=part_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=spatial_series.reference_frame, - comments=spatial_series.comments, + reference_frame=get_video_path( + spatial_series, "reference_frame", "" + ), + comments=getattr(spatial_series, "comments", "no commwnts"), description="x_position, y_position", ) likelihood.create_timeseries( diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 436d890d5..11c7019f3 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -1,3 +1,5 @@ +from time import time + import datajoint as dj import numpy as np import pandas as pd @@ -167,9 +169,7 @@ def make(self, key): path=f"{output_dir.as_posix()}/log.log", print_console=False, ) as logger: - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() logger.logger.info("-----------------------") idx = pd.IndexSlice # Get labels to smooth from Parameters table @@ -227,6 +227,9 @@ def make(self, key): .fetch_nwb()[0]["dlc_pose_estimation_position"] .get_spatial_series() ) + key["analysis_file_name"] = AnalysisNwbfile().create( # logged + key["nwb_file_name"] + ) # Add dataframe to AnalysisNwbfile nwb_analysis_file = AnalysisNwbfile() position = pynwb.behavior.Position() diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 74354db31..facfb8e25 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -1,5 +1,6 @@ import copy from pathlib import Path +from time import time import datajoint as dj import numpy as np @@ -58,9 +59,7 @@ class DLCPosV1(SpyglassMixin, dj.Computed): def make(self, key): orig_key = copy.deepcopy(key) # Add to Analysis NWB file - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) + AnalysisNwbfile()._creation_times["pre_create_time"] = time() key["pose_eval_result"] = self.evaluate_pose_estimation(key) pos_nwb = (DLCCentroid & key).fetch_nwb()[0] @@ -114,6 +113,9 @@ def make(self, key): comments=vid_frame_obj.comments, ) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) nwb_analysis_file = AnalysisNwbfile() key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], orientation From 88720432f04ef9d05e1bca8a927705208cd6f0de Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Wed, 15 May 2024 12:06:37 -0700 Subject: [PATCH 40/60] Cleanup of dlc video (#975) * wrong function call * get epoch directly from table key * default to opencv for speed * update changelog --- CHANGELOG.md | 2 +- .../v1/position_dlc_pose_estimation.py | 2 +- .../position/v1/position_dlc_selection.py | 21 ++----------------- 3 files changed, 4 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 096ec4fe9..6b4a2a5b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ ### Pipelines - DLC - - Allow dlc without pre-existing tracking data #973 + - Allow dlc without pre-existing tracking data #973, #975 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index 35d21345c..6a670fc31 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -296,7 +296,7 @@ def make(self, key): timestamps=part_df.time.to_numpy(), conversion=METERS_PER_CM, data=part_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=get_video_path( + reference_frame=getattr( spatial_series, "reference_frame", "" ), comments=getattr(spatial_series, "comments", "no commwnts"), diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index facfb8e25..b140111e1 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -311,24 +311,7 @@ def make(self, key): if "video_params" not in params: params["video_params"] = {} M_TO_CM = 100 - interval_list_name = ( - convert_epoch_interval_name_to_position_interval_name( - { - "nwb_file_name": key["nwb_file_name"], - "epoch": key["epoch"], - }, - populate_missing=False, - ) - ) - key["interval_list_name"] = interval_list_name - epoch = ( - int( - key["interval_list_name"] - .replace("pos ", "") - .replace(" valid times", "") - ) - + 1 - ) + epoch = key["epoch"] pose_estimation_key = { "nwb_file_name": key["nwb_file_name"], "epoch": epoch, @@ -440,7 +423,7 @@ def make(self, key): likelihoods=likelihoods, position_time=position_time, video_time=None, - processor=params.get("processor", "matplotlib"), + processor=params.get("processor", "opencv"), frames=frames_arr, percent_frames=percent_frames, output_video_filename=output_video_filename, From 26e74a40dfb263ce011d06d41cb2b278ed8caa11 Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Mon, 20 May 2024 11:19:57 -0500 Subject: [PATCH 41/60] Replace old `join` call with `cascade` (#982) * #981: -> * Update changelog --- CHANGELOG.md | 3 ++- src/spyglass/utils/dj_mixin.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b4a2a5b2..54d6f087b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,11 +13,12 @@ - Add rollback option to `populate_all_common` #957, #971 - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 +- Clean up old `TableChain.join` call in mixin delete. #982 ### Pipelines - DLC - - Allow dlc without pre-existing tracking data #973, #975 + - Allow dlc without pre-existing tracking data #973, #975 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index be6063d04..35e54ea7a 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -476,7 +476,7 @@ def _get_exp_summary(self): format = dj.U(self._session_pk, self._member_pk) restr = self.restriction or True - sess_link = self._session_connection.join(restr, reverse_order=True) + sess_link = self._session_connection.cascade(restr, direction="up") exp_missing = format & (sess_link - SesExp).proj(**empty_pk) exp_present = format & (sess_link * SesExp - exp_missing).proj() From ae4a7b839071778dfb20a9abc87ce779d5bf597f Mon Sep 17 00:00:00 2001 From: Kyu Hyun Lee Date: Tue, 21 May 2024 14:02:30 -0700 Subject: [PATCH 42/60] Check existence of optional fields during `Electrode` table population (#985) * Separate optional fields * Provide default for filtering * Black * Use get --- src/spyglass/common/common_ephys.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index 4cddc099d..722b32f74 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -101,6 +101,7 @@ def make(self, key): """Make without transaction Allows populate_all_common to work within a single transaction.""" + nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -132,10 +133,10 @@ def make(self, key): "region_id": BrainRegion.fetch_add( region_name=elect_data.group.location ), - "x": elect_data.x, - "y": elect_data.y, - "z": elect_data.z, - "filtering": elect_data.filtering, + "x": elect_data.get("x"), + "y": elect_data.get("y"), + "z": elect_data.get("z"), + "filtering": elect_data.get("filtering", "unfiltered"), "impedance": elect_data.get("imp"), **electrode_constants, } From bb4ce89a4b9ad23900a088398925bcc2a64806aa Mon Sep 17 00:00:00 2001 From: Chris Broz Date: Wed, 29 May 2024 12:13:14 -0500 Subject: [PATCH 43/60] Add pytests to position (#966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * WIP: Add vid downloader, start position pytests * WIP: Full coverage on common_position.py * WIP: Start add tests for DLC * WIP: pytests for position 2 * WIP: Align yml files * WIP: Add DLC Centroid tests * WIP: tests for centoid, model, orietation * WIP: Subpackage coverage 72% * WIP: Run tests on main * WIP: Add prints to debug data download * WIP: remove redundant slash * WIP: download full directory * WIP: let pytests down dlc items * WIP: gh-action managed container * WIP: use mysql as service * WIP: fix container ID * WIP: revise no-docker arg name * WIP: Box vars available to test step * WIP: Only first test * WIP: remove duplicate installs from conda deps -> pyproject * WIP: Add cuda driver * WIP: Cuda method selective * WIP: unpin DLC * WIP: parameterize skipping dlc tests * WIP: Add back Checkout * WIP: All download in action * WIP: custom wget func * WIP: revise data downloader * WIP: Run all * WIP: no docker container id in test database_settings * WIP: revise utils tests for --no-docker * WIP: add password to DatabaseSettings no-docker run * WIP: Spellcheck * WIP: debug mysql add role command * WIP: fix typo * 🎉 : Update changelog * Edit PR template to remind of local tests * Revert conda-installed dependencies * Return mountainsort to conda pip list * Upgrade action versions per node.js 16 deprecation --- .github/pull_request_template.md | 1 + .github/workflows/publish-docs.yml | 1 + .github/workflows/test-conda.yml | 74 ++- .github/workflows/test-package-build.yml | 23 +- .gitignore | 1 + CHANGELOG.md | 7 + CODE_OF_CONDUCT.md | 47 +- docs/src/misc/mixin.md | 13 +- environment.yml | 12 +- environment_dlc.yml | 14 +- pyproject.toml | 52 +- src/spyglass/common/common_ephys.py | 2 +- src/spyglass/common/common_position.py | 28 +- .../decoding/v0/dj_decoder_conversion.py | 15 + src/spyglass/position/v1/dlc_reader.py | 36 +- src/spyglass/position/v1/dlc_utils.py | 29 +- .../position/v1/position_dlc_centroid.py | 11 +- .../position/v1/position_dlc_cohort.py | 6 + .../v1/position_dlc_pose_estimation.py | 16 +- .../position/v1/position_dlc_position.py | 15 +- .../position/v1/position_dlc_selection.py | 5 +- .../position/v1/position_dlc_training.py | 6 +- .../position/v1/position_trodes_position.py | 43 +- src/spyglass/utils/database_settings.py | 17 +- tests/README.md | 41 +- tests/common/test_behav.py | 20 +- tests/common/test_position.py | 94 ++- tests/conftest.py | 581 ++++++++++++++++-- tests/container.py | 14 +- tests/data_downloader.py | 139 +++++ tests/position/__init__.py | 0 tests/position/conftest.py | 92 +++ tests/position/test_dlc_cent.py | 63 ++ tests/position/test_dlc_model.py | 18 + tests/position/test_dlc_orient.py | 45 ++ tests/position/test_dlc_pos_est.py | 36 ++ tests/position/test_dlc_position.py | 64 ++ tests/position/test_dlc_proj.py | 68 ++ tests/position/test_dlc_sel.py | 17 + tests/position/test_dlc_train.py | 37 ++ tests/position/test_pos_merge.py | 24 + tests/position/test_trodes.py | 6 + tests/utils/test_db_settings.py | 6 +- tests/utils/test_mixin.py | 12 +- 44 files changed, 1599 insertions(+), 252 deletions(-) create mode 100644 tests/data_downloader.py create mode 100644 tests/position/__init__.py create mode 100644 tests/position/conftest.py create mode 100644 tests/position/test_dlc_cent.py create mode 100644 tests/position/test_dlc_model.py create mode 100644 tests/position/test_dlc_orient.py create mode 100644 tests/position/test_dlc_pos_est.py create mode 100644 tests/position/test_dlc_position.py create mode 100644 tests/position/test_dlc_proj.py create mode 100644 tests/position/test_dlc_sel.py create mode 100644 tests/position/test_dlc_train.py create mode 100644 tests/position/test_pos_merge.py diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 87c66399c..aebccd6de 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -33,5 +33,6 @@ Table.alter() # Comment regarding the change - [ ] If release, I have updated the `CITATION.cff` - [ ] This PR makes edits to table definitions: (yes/no) - [ ] If table edits, I have included an `alter` snippet for release notes. +- [ ] If this PR makes changes to positon, I ran the relevant tests locally. - [ ] I have updated the `CHANGELOG.md` with PR number and description. - [ ] I have added/edited docs/notebooks to reflect the changes diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 3b39b877c..db1cf6224 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -5,6 +5,7 @@ on: - "*.*.*" # For docs bump, use X.X.XaX branches: - test_branch + workflow_dispatch: # Manually trigger with 'Run workflow' button permissions: contents: write diff --git a/.github/workflows/test-conda.yml b/.github/workflows/test-conda.yml index 6432b366e..fd9245c8e 100644 --- a/.github/workflows/test-conda.yml +++ b/.github/workflows/test-conda.yml @@ -1,4 +1,4 @@ -name: Test conda env and run tests +name: Tests on: push: @@ -7,52 +7,74 @@ on: - '!documentation' schedule: # once a day at midnight UTC - cron: '0 0 * * *' + workflow_dispatch: # Manually trigger with 'Run workflow' button + +concurrency: # Replace Cancel Workflow Action + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: run-tests: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest defaults: run: shell: bash -l {0} - strategy: - matrix: - os: [ubuntu-latest] #, macos-latest, windows-latest] env: - OS: ${{ matrix.os }} - PYTHON: '3.8' + OS: ubuntu-latest + PYTHON: '3.9' + UCSF_BOX_TOKEN: ${{ secrets.UCSF_BOX_TOKEN }} # for download and testing + UCSF_BOX_USER: ${{ secrets.UCSF_BOX_USER }} + services: + mysql: + image: datajoint/mysql:8.0 + env: # args: mysql -h 127.0.0.1 -P 3308 -uroot -ptutorial -e "CMD;" + MYSQL_DATABASE: localhost + MYSQL_ROOT_PASSWORD: tutorial + ports: + - 3308:3306 + options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 steps: - - name: Cancel Workflow Action - uses: styfle/cancel-workflow-action@0.11.0 - with: - access_token: ${{ github.token }} - all_but_latest: true - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON }} - - name: Set up conda environment - uses: conda-incubator/setup-miniconda@v2 + - name: Set up conda + uses: conda-incubator/setup-miniconda@v3 with: activate-environment: spyglass environment-file: environment.yml miniforge-variant: Mambaforge miniforge-version: latest - - name: Install spyglass + use-mamba: true + - name: Install apt dependencies run: | - pip install -e .[test] + sudo apt-get update # First mysql options + sudo apt-get install mysql-client libmysqlclient-dev libgirepository1.0-dev -y + sudo apt-get install ffmpeg libsm6 libxext6 -y # non-dlc position deps + - name: Run pip install for test deps + run: | + pip install --quiet .[test] - name: Download data env: - UCSF_BOX_TOKEN: ${{ secrets.UCSF_BOX_TOKEN }} - UCSF_BOX_USER: ${{ secrets.UCSF_BOX_USER }} - WEBSITE: ftps://ftp.box.com/trodes_to_nwb_test_data/minirec20230622.nwb + BASEURL: ftps://ftp.box.com/trodes_to_nwb_test_data/ + NWBFILE: minirec20230622.nwb # Relative to Base URL + VID_ONE: 20230622_sample_01_a1/20230622_sample_01_a1.1.h264 + VID_TWO: 20230622_sample_02_a1/20230622_sample_02_a1.1.h264 RAW_DIR: /home/runner/work/spyglass/spyglass/tests/_data/raw/ + VID_DIR: /home/runner/work/spyglass/spyglass/tests/_data/video/ run: | - mkdir -p $RAW_DIR - wget --recursive --no-verbose --no-host-directories --no-directories \ - --user $UCSF_BOX_USER --password $UCSF_BOX_TOKEN \ - -P $RAW_DIR $WEBSITE + mkdir -p $RAW_DIR $VID_DIR + wget_opts() { # Declare func with download options + wget \ + --recursive --no-verbose --no-host-directories --no-directories \ + --user "$UCSF_BOX_USER" --password "$UCSF_BOX_TOKEN" \ + -P "$1" "$BASEURL""$2" + } + wget_opts $RAW_DIR $NWBFILE + wget_opts $VID_DIR $VID_ONE + wget_opts $VID_DIR $VID_TWO - name: Run tests run: | - pytest -rP # env vars are set within certain tests + pytest --no-docker --no-dlc diff --git a/.github/workflows/test-package-build.yml b/.github/workflows/test-package-build.yml index 0098982cb..41aace719 100644 --- a/.github/workflows/test-package-build.yml +++ b/.github/workflows/test-package-build.yml @@ -13,6 +13,7 @@ on: branches: - master - maint/* + workflow_dispatch: # Manually trigger with 'Run workflow' button defaults: run: shell: bash @@ -20,10 +21,10 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.9 - run: pip install --upgrade build twine @@ -31,14 +32,14 @@ jobs: run: python -m build - run: twine check dist/* - name: Upload sdist and wheel artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: dist path: dist/ - name: Build git archive run: mkdir archive && git archive -v -o archive/archive.tgz HEAD - name: Upload git archive artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: archive path: archive/ @@ -51,13 +52,13 @@ jobs: steps: - name: Download sdist and wheel artifacts if: matrix.package != 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: dist path: dist/ - name: Download git archive artifact if: matrix.package == 'archive' - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: archive path: archive/ @@ -74,13 +75,9 @@ jobs: - name: Install sdist if: matrix.package == 'sdist' run: pip install dist/*.tar.gz - - name: Install archive - if: matrix.package == 'archive' + - name: Install archive # requires tag + if: matrix.package == 'archive' && startsWith(github.ref, 'refs/tags/') run: pip install archive/archive.tgz - # - name: Install test extras - # run: pip install spyglass[test] - # - name: Run tests - # run: pytest --doctest-modules -v --pyargs spyglass publish: name: Upload release to PyPI runs-on: ubuntu-latest @@ -92,7 +89,7 @@ jobs: id-token: write # IMPORTANT: this permission is mandatory for trusted publishing if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: dist path: dist/ diff --git a/.gitignore b/.gitignore index 052080023..6319e5f1c 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,7 @@ coverage.xml *.cover .hypothesis/ .pytest_cache/ +tests/_data/* # Translations *.mo diff --git a/CHANGELOG.md b/CHANGELOG.md index 54d6f087b..565b0c301 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,11 +14,18 @@ - Add long-distance restrictions via `<<` and `>>` operators. #943, #969 - Fix relative pathing for `mkdocstring-python=>1.9.1`. #967, #968 - Clean up old `TableChain.join` call in mixin delete. #982 +- Add pytests for position pipeline, various `test_mode` exceptions #966 +- Migrate `pip` dependencies from `environment.yml`s to `pyproject.toml` #966 ### Pipelines +- Common + - `PositionVideo` table now inserts into self after `make` #966 +- Decoding: Default values for classes on `ImportError` #966 - DLC - Allow dlc without pre-existing tracking data #973, #975 + - Raise `KeyError` for missing input parameters across helper funcs #966 + - `DLCPosVideo` table now inserts into self after `make` #966 ## [0.5.2] (April 22, 2024) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index d146e54a8..df319d828 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -6,7 +5,7 @@ We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, +identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. @@ -18,24 +17,24 @@ diverse, inclusive, and healthy community. Examples of behavior that contributes to a positive environment for our community include: -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community Examples of unacceptable behavior include: -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting +- The use of sexualized language or imagery, and sexual attention or advances of + any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, + without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting ## Enforcement Responsibilities @@ -61,8 +60,8 @@ representative at an online or offline event. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at -eric.denovellis@ucsf.edu. -All complaints will be reviewed and investigated promptly and fairly. +eric.denovellis@ucsf.edu. All complaints will be reviewed and investigated +promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. @@ -120,14 +119,14 @@ version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by -[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. +[Mozilla's code of conduct enforcement ladder][mozilla coc]. For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/faq][faq]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. +[faq]: https://www.contributor-covenant.org/faq [homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq +[mozilla coc]: https://github.com/mozilla/diversity [translations]: https://www.contributor-covenant.org/translations +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html diff --git a/docs/src/misc/mixin.md b/docs/src/misc/mixin.md index 229747402..ab49b0c49 100644 --- a/docs/src/misc/mixin.md +++ b/docs/src/misc/mixin.md @@ -21,6 +21,11 @@ schema = dj.schema("my_schema") @schema class MyOldTable(dj.Manual): pass + + +@schema +class MyNewTable(SpyglassMixin, dj.Manual): + pass ``` **NOTE**: The mixin must be the first class inherited from in order to override @@ -60,10 +65,10 @@ key and `>>` as a shorthand for `restrict_by` a downstream key. from spyglass.example import AnyTable AnyTable() << 'upstream_attribute="value"' -AnyTable() >> 'downsteam_attribute="value"' +AnyTable() >> 'downstream_attribute="value"' # Equivalent to -AnyTable().restrict_by('downsteam_attribute="value"', direction="down") +AnyTable().restrict_by('downstream_attribute="value"', direction="down") AnyTable().restrict_by('upstream_attribute="value"', direction="up") ``` @@ -136,7 +141,7 @@ function, `delete_downstream_merge`, to handle this, which is run by default when calling `delete`. `delete_downstream_merge`, also aliased as `ddm`, identifies all Merge tables -downsteam of where it is called. If `dry_run=True`, it will return a list of +downstream of where it is called. If `dry_run=True`, it will return a list of entries that would be deleted, otherwise it will delete them. Importantly, `delete_downstream_merge` cannot properly interact with tables that @@ -156,7 +161,7 @@ from spyglass.example import MyMerge restricted_nwbfile.delete_downstream_merge(reload_cache=True, dry_run=False) ``` -Because each table keeps a cache of downsteam merge tables, it is important to +Because each table keeps a cache of downstream merge tables, it is important to reload the cache if the table has been imported after the cache was created. Speed gains can also be achieved by avoiding re-instancing the table each time. diff --git a/environment.yml b/environment.yml index 0f5e19187..7fa1b51ea 100644 --- a/environment.yml +++ b/environment.yml @@ -10,9 +10,10 @@ name: spyglass channels: - conda-forge - defaults - # - pytorch # dlc-only - franklab - edeno + # - pytorch # dlc-only + # - anaconda # dlc-only, for cudatoolkit dependencies: - bottleneck # - cudatoolkit=11.3 # dlc-only @@ -36,15 +37,6 @@ dependencies: # - torchvision # dlc-only - track_linearization>=2.3 - pip: - - "black[jupyter]" - - datajoint>=0.13.6 - # - deeplabcut<2.3.0 # dlc-only - ghostipy # for common_filter - - ndx-franklab-novela>=0.1.0 - mountainsort4 - - panel<=1.3.5 # See panel #6325 - - pubnub<=6.4.0 - - pynwb>=2.2.0,<3 - - sortingview>=0.11 - - spikeinterface>=0.99.1,<0.100 - . diff --git a/environment_dlc.yml b/environment_dlc.yml index 45fd107c8..9870a0424 100644 --- a/environment_dlc.yml +++ b/environment_dlc.yml @@ -10,9 +10,10 @@ name: spyglass-dlc channels: - conda-forge - defaults - - pytorch # dlc-only - franklab - edeno + - pytorch # dlc-only + - anaconda # dlc-only, for cudatoolkit dependencies: - bottleneck - cudatoolkit=11.3 # dlc-only @@ -22,6 +23,7 @@ dependencies: - libgcc # dlc-only - matplotlib - non_local_detector + - numpy<1.24 - pip>=20.2.* - position_tools - pybind11 # req by mountainsort4 -> isosplit5 @@ -35,16 +37,6 @@ dependencies: - torchvision # dlc-only - track_linearization>=2.3 - pip: - - "black[jupyter]" - - datajoint>=0.13.6 - - deeplabcut<2.3.0 # dlc-only - ghostipy # for common_filter - - ndx-franklab-novela>=0.1.0 - mountainsort4 - - panel<=1.3.5 # See panel #6325 - - pubnub<=6.4.0 - - pynwb>=2.2.0,<3 - - sortingview>=0.11 - - spikeinterface>=0.98.2,<0.99 - - tensorflow<=2.12 # dlc-only - .[dlc] diff --git a/pyproject.toml b/pyproject.toml index ffb8d0df6..78d189b73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ keywords = [ "kachery", "sortingview", ] +dynamic = ["version"] dependencies = [ "black[jupyter]", "bottleneck", @@ -47,8 +48,8 @@ dependencies = [ "non_local_detector", "numpy<1.24", "opencv-python", - "panel<=1.3.4", # See panel #6325 - "position_tools", + "panel>=1.4.0", # panel #6325 resolved + "position_tools>=0.1.0", "pubnub<6.4.0", # TODO: remove this when sortingview is updated "pydotplus", "pynwb>=2.2.0,<3", @@ -58,31 +59,27 @@ dependencies = [ "spikeinterface>=0.99.1,<0.100", "track_linearization>=2.3", ] -dynamic = ["version"] - -[project.scripts] -spyglass_cli = "spyglass.cli:cli" - -[project.urls] -"Homepage" = "https://github.com/LorenFrankLab/spyglass" -"Bug Tracker" = "https://github.com/LorenFrankLab/spyglass/issues" [project.optional-dependencies] -dlc = ["ffmpeg", "numba>=0.54", "deeplabcut<2.3.0"] +dlc = [ + "ffmpeg", + "deeplabcut[tf]", # removing dlc pin removes need to pin tf/numba +] test = [ - "click", # for CLI subpackage only - "docker", # for tests in a container + "click", # for CLI subpackage only + "docker", # for tests in a container "ghostipy", - "kachery", # database access + "kachery", # database access "kachery-client", "kachery-cloud>=0.4.0", - "pre-commit", # linting - "pytest", # unit testing - "pytest-cov", # code coverage + "pre-commit", # linting + "pytest", # unit testing + "pytest-cov", # code coverage + "pytest-xvfb", # for headless testing of Qt ] docs = [ "hatch", # Get version from env - "jupytext==1.16.0", # Convert notebooks to .py + "jupytext", # Convert notebooks to .py "mike", # Docs versioning "mkdocs", # Docs core "mkdocs-exclude", # Docs exclude files @@ -94,6 +91,13 @@ docs = [ "mkdocstrings[python]", # Docs API docstrings ] +[project.scripts] +spyglass_cli = "spyglass.cli:cli" + +[project.urls] +"Homepage" = "https://github.com/LorenFrankLab/spyglass" +"Bug Tracker" = "https://github.com/LorenFrankLab/spyglass/issues" + [tool.hatch.version] source = "vcs" @@ -120,20 +124,28 @@ ignore-words-list = 'nevers' [tool.pytest.ini_options] minversion = "7.0" addopts = [ - "-sv", + # "-sv", # no capture, verbose output # "--sw", # stepwise: resume with next test after failure # "--pdb", # drop into debugger on failure "-p no:warnings", # "--no-teardown", # don't teardown the database after tests - # "--quiet-spy", # don't show logging from spyglass + # "--quiet-spy", # don't show logging from spyglass + # "--no-dlc", # don't run DLC tests "--show-capture=no", "--pdbcls=IPython.terminal.debugger:TerminalPdb", # use ipython debugger + "--doctest-modules", # run doctests in all modules "--cov=spyglass", "--cov-report=term-missing", "--no-cov-on-fail", ] testpaths = ["tests"] log_level = "INFO" +env = [ + "QT_QPA_PLATFORM = offscreen", # QT fails headless without this + "DISPLAY = :0", # QT fails headless without this + "TF_ENABLE_ONEDNN_OPTS = 0", # TF disable approx calcs + "TF_CPP_MIN_LOG_LEVEL = 2", # Disable TF warnings +] [tool.coverage.run] source = ["*/src/spyglass/*"] diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index 722b32f74..f9abff647 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -625,7 +625,7 @@ def set_lfp_band_electrodes( if lfp_sampling_rate // decimation != lfp_band_sampling_rate: raise ValueError( f"lfp_band_sampling rate {lfp_band_sampling_rate} is not an integer divisor of lfp " - f"samping rate {lfp_sampling_rate}" + f"sampling rate {lfp_sampling_rate}" ) # filter query = FirFilterParameters() & { diff --git a/src/spyglass/common/common_position.py b/src/spyglass/common/common_position.py index 382e39069..ed91aa463 100644 --- a/src/spyglass/common/common_position.py +++ b/src/spyglass/common/common_position.py @@ -24,7 +24,7 @@ from spyglass.common.common_behav import RawPosition, VideoFile from spyglass.common.common_interval import IntervalList # noqa F401 from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.settings import raw_dir, video_dir +from spyglass.settings import raw_dir, test_mode, video_dir from spyglass.utils import SpyglassMixin, logger from spyglass.utils.dj_helper_fn import deprecated_factory @@ -601,6 +601,7 @@ def make(self, key): cm_to_pixels=cm_per_pixel, disable_progressbar=False, ) + self.insert1(key) @staticmethod def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): @@ -644,6 +645,7 @@ def make_video( disable_progressbar=False, arrow_radius=15, circle_radius=8, + truncate_data=False, # reduce data to min length across all variables ): import cv2 # noqa: F401 @@ -657,6 +659,25 @@ def make_video( frame_rate = video.get(5) n_frames = int(head_orientation_mean.shape[0]) + if test_mode or truncate_data: + # pytest video data has mismatched shapes in some cases + # centroid (267, 2), video_time (270, 2), position_time (5193,) + min_len = min( + n_frames, + len(video_time), + len(position_time), + len(head_position_mean), + len(head_orientation_mean), + min(len(v) for v in centroids.values()), + ) + n_frames = min_len + video_time = video_time[:min_len] + position_time = position_time[:min_len] + head_position_mean = head_position_mean[:min_len] + head_orientation_mean = head_orientation_mean[:min_len] + for color, data in centroids.items(): + centroids[color] = data[:min_len] + out = cv2.VideoWriter( output_video_filename, fourcc, frame_rate, frame_size, True ) @@ -749,7 +770,10 @@ def make_video( video.release() out.release() - cv2.destroyAllWindows() + try: + cv2.destroyAllWindows() + except cv2.error: # if cv is already closed or does not have func + pass # ----------------------------- Migrated Tables ----------------------------- diff --git a/src/spyglass/decoding/v0/dj_decoder_conversion.py b/src/spyglass/decoding/v0/dj_decoder_conversion.py index edcb0d637..1cf6d30c4 100644 --- a/src/spyglass/decoding/v0/dj_decoder_conversion.py +++ b/src/spyglass/decoding/v0/dj_decoder_conversion.py @@ -26,6 +26,21 @@ ObservationModel, ) except ImportError as e: + ( + Identity, + RandomWalk, + RandomWalkDirection1, + RandomWalkDirection2, + Uniform, + DiagonalDiscrete, + RandomDiscrete, + UniformDiscrete, + UserDefinedDiscrete, + Environment, + UniformInitialConditions, + UniformOneEnvironmentInitialConditions, + ObservationModel, + ) = [None] * 13 logger.warning(e) from track_linearization import make_track_graph diff --git a/src/spyglass/position/v1/dlc_reader.py b/src/spyglass/position/v1/dlc_reader.py index c2e56063f..caa3c2e5c 100644 --- a/src/spyglass/position/v1/dlc_reader.py +++ b/src/spyglass/position/v1/dlc_reader.py @@ -8,6 +8,8 @@ import pandas as pd import ruamel.yaml as yaml +from spyglass.settings import test_mode + class PoseEstimation: def __init__( @@ -32,10 +34,11 @@ def __init__( pkl_paths = list( self.dlc_dir.rglob(f"{filename_prefix}*meta.pickle") ) - assert len(pkl_paths) == 1, ( - "Unable to find one unique .pickle file in: " - + f"{dlc_dir} - Found: {len(pkl_paths)}" - ) + if not test_mode: + assert len(pkl_paths) == 1, ( + "Unable to find one unique .pickle file in: " + + f"{dlc_dir} - Found: {len(pkl_paths)}" + ) self.pkl_path = pkl_paths[0] else: self.pkl_path = Path(pkl_path) @@ -44,18 +47,20 @@ def __init__( # data file: h5 - body part outputs from the DLC post estimation step if h5_path is None: h5_paths = list(self.dlc_dir.rglob(f"{filename_prefix}*.h5")) - assert len(h5_paths) == 1, ( - "Unable to find one unique .h5 file in: " - + f"{dlc_dir} - Found: {len(h5_paths)}" - ) + if not test_mode: + assert len(h5_paths) == 1, ( + "Unable to find one unique .h5 file in: " + + f"{dlc_dir} - Found: {len(h5_paths)}" + ) self.h5_path = h5_paths[0] else: self.h5_path = Path(h5_path) assert self.h5_path.exists() - assert ( - self.pkl_path.stem == self.h5_path.stem + "_meta" - ), f"Mismatching h5 ({self.h5_path.stem}) and pickle {self.pkl_path.stem}" + if not test_mode: + assert ( + self.pkl_path.stem == self.h5_path.stem + "_meta" + ), f"Mismatching h5 ({self.h5_path.stem}) and pickle {self.pkl_path.stem}" # config file: yaml - configuration for invoking the DLC post estimation step if yml_path is None: @@ -65,10 +70,11 @@ def __init__( yml_paths = [ val for val in yml_paths if val.stem == "dj_dlc_config" ] - assert len(yml_paths) == 1, ( - "Unable to find one unique .yaml file in: " - + f"{dlc_dir} - Found: {len(yml_paths)}" - ) + if not test_mode: + assert len(yml_paths) == 1, ( + "Unable to find one unique .yaml file in: " + + f"{dlc_dir} - Found: {len(yml_paths)}" + ) self.yml_path = yml_paths[0] else: self.yml_path = Path(yml_path) diff --git a/src/spyglass/position/v1/dlc_utils.py b/src/spyglass/position/v1/dlc_utils.py index 369207886..6d27615e4 100644 --- a/src/spyglass/position/v1/dlc_utils.py +++ b/src/spyglass/position/v1/dlc_utils.py @@ -11,7 +11,7 @@ from contextlib import redirect_stdout from itertools import groupby from operator import itemgetter -from typing import Union +from typing import Iterable, Union import datajoint as dj import matplotlib.pyplot as plt @@ -20,8 +20,8 @@ from tqdm import tqdm as tqdm from spyglass.common.common_behav import VideoFile +from spyglass.settings import dlc_output_dir, dlc_video_dir, raw_dir, test_mode from spyglass.utils import logger -from spyglass.settings import dlc_output_dir, dlc_video_dir, raw_dir def validate_option( @@ -62,7 +62,10 @@ def validate_option( f"Unknown {name}: {option} " f"Available options: {options}" ) - if types and not isinstance(option, tuple(types)): + if types is not None and not isinstance(types, Iterable): + types = (types,) + + if types is not None and not isinstance(option, types): raise TypeError(f"{name} is {type(option)}. Available types {types}") if val_range and not (val_range[0] <= option <= val_range[1]): @@ -108,7 +111,6 @@ def validate_smooth_params(params): if not params.get("smooth"): return smoothing_params = params.get("smoothing_params") - validate_option(smoother=smoothing_params, name="smoothing_params") validate_option( option=smoothing_params.get("smooth_method"), name="smooth_method", @@ -194,7 +196,7 @@ class OutputLogger: # TODO: migrate to spyglass.utils.logger def __init__(self, name, path, level="INFO", **kwargs): self.logger = self.setup_logger(name, path, **kwargs) self.name = self.logger.name - self.level = getattr(logging, level) + self.level = 30 if test_mode else getattr(logging, level) def setup_logger( self, name_logfile, path_logfile, print_console=False @@ -383,7 +385,17 @@ def infer_output_dir(key, makedir=True): """ # TODO: add check to make sure interval_list_name refers to a single epoch # Or make key include epoch in and of itself instead of interval_list_name - nwb_file_name = key["nwb_file_name"].split("_.")[0] + + file_name = key.get("nwb_file_name") + dlc_model_name = key.get("dlc_model_name") + epoch = key.get("epoch") + + if not all([file_name, dlc_model_name, epoch]): + raise ValueError( + "Key must contain 'nwb_file_name', 'dlc_model_name', and 'epoch'" + ) + + nwb_file_name = file_name.split("_.")[0] output_dir = pathlib.Path(dlc_output_dir) / pathlib.Path( f"{nwb_file_name}/{nwb_file_name}_{key['epoch']:02}" f"_model_" + key["dlc_model_name"].replace(" ", "-") @@ -1019,7 +1031,10 @@ def make_video( video.release() out.release() print("destroying cv2 windows") - cv2.destroyAllWindows() + try: + cv2.destroyAllWindows() + except cv2.error: # if cv is already closed or does not have func + pass print("finished making video with opencv") return diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index f1f077d6a..70a1c1252 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -170,7 +170,7 @@ def make(self, key): for point in required_points: bodypart = points[point] if bodypart not in bodyparts_avail: - raise ValueError( + raise ValueError( # TODO: migrate to input validation "Bodypart in points not in model." f"\tBodypart {bodypart}" f"\tIn Model {bodyparts_avail}" @@ -222,6 +222,7 @@ def make(self, key): "smoothing_duration" ) if not smoothing_duration: + # TODO: remove - validated with `validate_smooth_params` raise KeyError( "smoothing_duration needs to be passed within smoothing_params" ) @@ -368,6 +369,7 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): """Determines the centroid of 4 LEDS on an implant LED ring. Assumed to be the Green LED, and 3 red LEDs called: redLED_C, redLED_L, redLED_R By default, uses (greenled + redLED_C) / 2 to calculate centroid + If Green LED is NaN, but red center LED is not, then the red center LED is called the centroid If green and red center LEDs are NaN, but red left and red right LEDs are not, @@ -397,6 +399,9 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): numpy array with shape (n_time, 2) centroid[0] is the x coord and centroid[1] is the y coord """ + if not (params.get("max_LED_separation") and params.get("points")): + raise KeyError("max_LED_separation/points need to be passed in params") + centroid = np.zeros(shape=(len(pos_df), 2)) idx = pd.IndexSlice # TODO: this feels messy, clean-up @@ -722,6 +727,8 @@ def two_pt_centroid(pos_df: pd.DataFrame, **params): numpy array with shape (n_time, 2) centroid[0] is the x coord and centroid[1] is the y coord """ + if not (params.get("max_LED_separation") and params.get("points")): + raise KeyError("max_LED_separation/points need to be passed in params") idx = pd.IndexSlice centroid = np.zeros(shape=(len(pos_df), 2)) @@ -797,6 +804,8 @@ def one_pt_centroid(pos_df: pd.DataFrame, **params): numpy array with shape (n_time, 2) centroid[0] is the x coord and centroid[1] is the y coord """ + if not params.get("points"): + raise KeyError("points need to be passed in params") idx = pd.IndexSlice PT1 = params["points"].pop("point1", None) centroid = pos_df.loc[:, idx[PT1, ("x", "y")]].to_numpy() diff --git a/src/spyglass/position/v1/position_dlc_cohort.py b/src/spyglass/position/v1/position_dlc_cohort.py index b265a1ce5..6cf1f0eee 100644 --- a/src/spyglass/position/v1/position_dlc_cohort.py +++ b/src/spyglass/position/v1/position_dlc_cohort.py @@ -113,6 +113,12 @@ def make(self, key): bodyparts_params_dict ), "more entries found in DLCSmoothInterp than specified in bodyparts_params_dict" table_column_names = list(table_entries[0].dtype.fields.keys()) + + if len(table_entries) == 0: + raise ValueError( + f"No entries found in DLCSmoothInterp for {temp_key}" + ) + for table_entry in table_entries: entry_key = { **{ diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index 6a670fc31..6ae7669bf 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -35,7 +35,7 @@ class DLCPoseEstimationSelection(SpyglassMixin, dj.Manual): """ @classmethod - def get_video_crop(cls, video_path): + def get_video_crop(cls, video_path, crop_input=None): """ Queries the user to determine the cropping parameters for a given video @@ -61,9 +61,13 @@ def get_video_crop(cls, video_path): ax.set_yticks(np.arange(ylims[0], ylims[-1], -50)) ax.grid(visible=True, color="white", lw=0.5, alpha=0.5) display(fig) - crop_input = input( - "Please enter the crop parameters for your video in format xmin, xmax, ymin, ymax, or 'none'\n" - ) + + if crop_input is None: + crop_input = input( + "Please enter the crop parameters for your video in format " + + "xmin, xmax, ymin, ymax, or 'none'\n" + ) + plt.close() if crop_input.lower() == "none": return None @@ -98,6 +102,10 @@ def insert_estimation_task( video_path, video_filename, _, _ = get_video_path(key) output_dir = infer_output_dir(key) + + if not video_path: + raise FileNotFoundError(f"Video file not found for {key}") + with OutputLogger( name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", path=f"{output_dir.as_posix()}/log.log", diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 11c7019f3..c18eafd62 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -13,6 +13,7 @@ validate_option, validate_smooth_params, ) +from spyglass.settings import test_mode from spyglass.utils.dj_mixin import SpyglassMixin from .position_dlc_pose_estimation import DLCPoseEstimation @@ -176,7 +177,12 @@ def make(self, key): params = (DLCSmoothInterpParams() & key).fetch1("params") # Get DLC output dataframe logger.logger.info("fetching Pose Estimation Dataframe") - dlc_df = (DLCPoseEstimation.BodyPart() & key).fetch1_dataframe() + + bp_key = key.copy() + if test_mode: # during testing, analysis_file not in BodyPart table + bp_key.pop("analysis_file_name", None) + + dlc_df = (DLCPoseEstimation.BodyPart() & bp_key).fetch1_dataframe() dt = np.median(np.diff(dlc_df.index.to_numpy())) sampling_rate = 1 / dt logger.logger.info("Identifying indices to NaN") @@ -223,7 +229,7 @@ def make(self, key): final_df = smooth_df.drop(["likelihood"], axis=1) final_df = final_df.rename_axis("time").reset_index() position_nwb_data = ( - (DLCPoseEstimation.BodyPart() & key) + (DLCPoseEstimation.BodyPart() & bp_key) .fetch_nwb()[0]["dlc_pose_estimation_position"] .get_spatial_series() ) @@ -338,6 +344,11 @@ def nan_inds( subthresh_inds_mask, inds_to_span=inds_to_span ) + if len(good_spans) == 0: + # Prevents ref before assignment error of mask on return + # TODO: instead of raise, insert empty dataframe + raise ValueError("No good spans found in the data") + for span in good_spans[::-1]: if np.sum(np.isnan(dlc_df.iloc[span[0] : span[-1]].x)) > 0: nan_mask = np.isnan(dlc_df.iloc[span[0] : span[-1]].x) diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index b140111e1..02692ce14 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -276,7 +276,7 @@ def insert_default(cls): def get_default(cls): query = cls & {"dlc_pos_video_params_name": "default"} if not len(query) > 0: - cls().insert_default(skip_duplicates=True) + cls().insert_default() default = (cls & {"dlc_pos_video_params_name": "default"}).fetch1() else: default = query.fetch1() @@ -304,6 +304,8 @@ class DLCPosVideo(SpyglassMixin, dj.Computed): --- """ + # TODO: Shoultn't this keep track of the video file it creates? + def make(self, key): from tqdm import tqdm as tqdm @@ -432,3 +434,4 @@ def make(self, key): crop=crop, **params["video_params"], ) + self.insert1(key) diff --git a/src/spyglass/position/v1/position_dlc_training.py b/src/spyglass/position/v1/position_dlc_training.py index ec40d43e0..393eb6af9 100644 --- a/src/spyglass/position/v1/position_dlc_training.py +++ b/src/spyglass/position/v1/position_dlc_training.py @@ -6,6 +6,7 @@ from spyglass.position.v1.dlc_utils import OutputLogger from spyglass.position.v1.position_dlc_project import DLCProject +from spyglass.settings import test_mode from spyglass.utils.dj_mixin import SpyglassMixin schema = dj.schema("position_v1_dlc_training") @@ -107,7 +108,7 @@ class DLCModelTrainingSelection(SpyglassMixin, dj.Manual): """ def insert1(self, key, **kwargs): - training_id = key["training_id"] + training_id = key.get("training_id") if training_id is None: training_id = ( dj.U().aggr(self & key, n="max(training_id)").fetch1("n") or 0 @@ -185,6 +186,7 @@ def make(self, key): if k in training_dataset_input_args } logger.logger.info("creating training dataset") + # err here create_training_dataset(dlc_cfg_filepath, **training_dataset_kwargs) # ---- Trigger DLC model training job ---- train_network_input_args = list( @@ -198,6 +200,8 @@ def make(self, key): for k in ["shuffle", "trainingsetindex", "maxiters"]: if k in train_network_kwargs: train_network_kwargs[k] = int(train_network_kwargs[k]) + if test_mode: + train_network_kwargs["maxiters"] = 2 try: train_network(dlc_cfg_filepath, **train_network_kwargs) except ( diff --git a/src/spyglass/position/v1/position_trodes_position.py b/src/spyglass/position/v1/position_trodes_position.py index 1a422b86f..86487ad23 100644 --- a/src/spyglass/position/v1/position_trodes_position.py +++ b/src/spyglass/position/v1/position_trodes_position.py @@ -11,6 +11,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.common.common_position import IntervalPositionInfo from spyglass.position.v1.dlc_utils import check_videofile, get_video_path +from spyglass.settings import test_mode from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_trodes_position") @@ -337,11 +338,23 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): return data / cm_to_pixels @staticmethod - def fill_nan(variable, video_time, variable_time): + def fill_nan(variable, video_time, variable_time, truncate_data=False): + """Fill in missing values in variable with nans at video_time. + + Parameters + ---------- + variable : ndarray, shape (n_time,) or (n_time, n_dims) + The variable to fill in. + video_time : ndarray, shape (n_video_time,) + The time points of the video. + variable_time : ndarray, shape (n_variable_time,) + The time points of the variable. + """ # TODO: Reduce duplication across dlc_utils and common_position - video_ind = np.digitize(variable_time, video_time[1:]) + video_ind = np.digitize(variable_time, video_time[1:]) n_video_time = len(video_time) + try: n_variable_dims = variable.shape[1] filled_variable = np.full((n_video_time, n_variable_dims), np.nan) @@ -365,6 +378,7 @@ def make_video( disable_progressbar=False, arrow_radius=15, circle_radius=8, + truncate_data=False, # reduce data to min length across all variables ): import cv2 @@ -382,8 +396,31 @@ def make_video( output_video_filename, fourcc, frame_rate, frame_size, True ) + if test_mode or truncate_data: + # pytest video data has mismatched shapes in some cases + # centroid (267, 2), video_time (270, 2), position_time (5193,) + min_len = min( + n_frames, + len(video_time), + len(position_time), + len(position_mean), + len(orientation_mean), + min(len(v) for v in centroids.values()), + ) + n_frames = min_len + video_time = video_time[:min_len] + position_time = position_time[:min_len] + position_mean = position_mean[:min_len] + orientation_mean = orientation_mean[:min_len] + for color, data in centroids.items(): + centroids[color] = data[:min_len] + centroids = { - color: self.fill_nan(data, video_time, position_time) + color: self.fill_nan( + variable=data, + video_time=video_time, + variable_time=position_time, + ) for color, data in centroids.items() } position_mean = self.fill_nan(position_mean, video_time, position_time) diff --git a/src/spyglass/utils/database_settings.py b/src/spyglass/utils/database_settings.py index 7e1834313..1ad6efaa4 100755 --- a/src/spyglass/utils/database_settings.py +++ b/src/spyglass/utils/database_settings.py @@ -1,7 +1,9 @@ #!/usr/bin/env python + import os import sys import tempfile +from functools import cached_property from pathlib import Path import datajoint as dj @@ -37,6 +39,7 @@ def __init__( target_database=None, exec_user=None, exec_pass=None, + test_mode=False, ): """Class to manage common database settings @@ -66,6 +69,9 @@ def __init__( User for executing commands. If None, use dj.config exec_pass : str, optional Password for executing commands. If None, use dj.config + test_mode : bool, optional + Default False. If True, prepend sudo to commands for use in CI/CD + Only true in github actions, not true in local testing. """ self.shared_modules = [f"{m}{ESC}" for m in SHARED_MODULES] self.user = user_name or dj.config["database.user"] @@ -76,6 +82,7 @@ def __init__( self.target_database = target_database or "mysql" self.exec_user = exec_user or dj.config["database.user"] self.exec_pass = exec_pass or dj.config["database.password"] + self.test_mode = test_mode @property def _create_roles_dict(self): @@ -102,7 +109,7 @@ def _create_roles_dict(self): ], ) - @property + @cached_property def _create_roles_sql(self): return sum(self._create_roles_dict.values(), []) @@ -214,10 +221,16 @@ def exec(self, file): if self.debug: return + if self.test_mode: + prefix = "sudo mysql -h 127.0.0.1 -P 3308 -uroot -ptutorial" + else: + prefix = f"mysql -h {self.host} -u {self.exec_user} -p" + cmd = ( - f"mysql -p -h {self.host} < {file.name}" + f"{prefix} < {file.name}" if self.target_database == "mysql" else f"docker exec -i {self.target_database} mysql -u " + f"{self.exec_user} --password={self.exec_pass} < {file.name}" ) + os.system(cmd) diff --git a/tests/README.md b/tests/README.md index 476dbb4c8..36b6ab71f 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,5 +1,25 @@ # PyTests +## Environment + +To allow pytest helpers to automatically dowlnoad requisite data, you'll need to +set credentials for Box. Consider adding these to a private `.env` file. + +- `UCSF_BOX_USER`: UCSF email address +- `UCSF_BOX_TOKEN`: Token generated from UCSF Box account + +To facilitate headless testing of various Qt-based tools as well as Tensorflow, +`pyproject.toml` includes some environment variables associated with the +display. These are... + +- `QT_QPA_PLATFORM`: Set to `offscreen` to prevent the need for a display. +- `TF_ENABLE_ONEDNN_OPTS`: Set to `1` to enable Tensorflow optimizations. +- `TF_CPP_MIN_LOG_LEVEL`: Set to `2` to suppress Tensorflow warnings. + + + +## Options + This directory is contains files for testing the code. Simply by running `pytest` from the root directory, all tests will be run with default parameters specified in `pyproject.toml`. Notable optional parameters include... @@ -7,7 +27,7 @@ specified in `pyproject.toml`. Notable optional parameters include... - Coverage items. The coverage report indicates what percentage of the code was included in tests. - - `--cov=spyglatss`: Which package should be described in the coverage report + - `--cov=spyglass`: Which package should be described in the coverage report - `--cov-report term-missing`: Include lines of items missing in coverage - Verbosity. @@ -18,23 +38,24 @@ specified in `pyproject.toml`. Notable optional parameters include... - Data and database. - - `--no-server`: Default False, launch Docker container from python. When - True, no server is started and tests attempt to connect to existing - container. + - `--base_dir`: Default `./tests/test_data/`. Where to store downloaded and + created files. - `--no-teardown`: Default False. When True, docker database tables are preserved on exit. Set to false to inspect output items after testing. - - `--my-datadir ./rel-path/`: Default `./tests/test_data/`. Where to store - created files. + - `--no-docker`: Default False, launch Docker container from python. When + True, no server is started and tests attempt to connect to existing + container. For github actions, `--no-docker` is set to configure the + container class as null. + - `--no-dlc`: Default False. When True, skip data downloads for and tests of + features that require DeepLabCut. - Incremental running. - - `-m`: Run tests with the - [given marker](https://docs.pytest.org/en/6.2.x/usage.html#specifying-tests-selecting-tests) - (e.g., `pytest -m current`). - - `--sw`: Stepwise. Continue from previously failed test when starting again. - `-s`: No capture. By including `from IPython import embed; embed()` in a test, and using this flag, you can open an IPython environment from within a test + - `-v`: Verbose. List individual tests, report pass/fail. + - `--sw`: Stepwise. Continue from previously failed test when starting again. - `--pdb`: Enter debug mode if a test fails. - `tests/test_file.py -k test_name`: To run just a set of tests, specify the file name at the end of the command. To run a single test, further specify diff --git a/tests/common/test_behav.py b/tests/common/test_behav.py index 1f4767dfb..6f2daa690 100644 --- a/tests/common/test_behav.py +++ b/tests/common/test_behav.py @@ -79,22 +79,18 @@ def test_populate_state_script(common, pop_state_script): ), "StateScript populate unexpected effect" -@pytest.mark.skip(reason="No video files in mini") -def test_videofile_no_transaction(common, mini_restr): - """Test no transaction""" - common.VideoFile()._no_transaction_make(mini_restr) - - -@pytest.mark.skip(reason="No video files in mini") -def test_videofile_update_entries(common): +def test_videofile_update_entries(common, video_keys): """Test update entries""" - common.VideoFile().update_entries() + key = common.VideoFile().fetch(as_dict=True)[0] + common.VideoFile().update_entries(key) -@pytest.mark.skip(reason="No video files in mini") -def test_videofile_getabspath(common, mini_restr): +def test_videofile_getabspath(common, video_keys): """Test get absolute path""" - common.VideoFile().getabspath(mini_restr) + key = video_keys[0] + path = common.VideoFile().get_abs_path(key) + file_part = key["nwb_file_name"].split("2")[0] + "_0" + str(key["epoch"]) + assert file_part in path, "VideoFile get_abs_path failed" @pytest.mark.skipif(not TEARDOWN, reason="No teardown: expect no change.") diff --git a/tests/common/test_position.py b/tests/common/test_position.py index b10c0654b..bb74e213c 100644 --- a/tests/common/test_position.py +++ b/tests/common/test_position.py @@ -1,29 +1,31 @@ +import numpy as np +import pandas as pd import pytest -@pytest.fixture +@pytest.fixture(scope="session") def common_position(common): yield common.common_position -@pytest.fixture +@pytest.fixture(scope="session") def interval_position_info(common_position): yield common_position.IntervalPositionInfo -@pytest.fixture +@pytest.fixture(scope="session") def default_param_key(): yield {"position_info_param_name": "default"} -@pytest.fixture +@pytest.fixture(scope="session") def interval_key(common): yield (common.IntervalList & "interval_list_name LIKE 'pos 0%'").fetch1( "KEY" ) -@pytest.fixture +@pytest.fixture(scope="session") def param_table(common_position, default_param_key, teardown): param_table = common_position.PositionInfoParameters() param_table.insert1(default_param_key, skip_duplicates=True) @@ -32,7 +34,7 @@ def param_table(common_position, default_param_key, teardown): param_table.delete(safemode=False) -@pytest.fixture +@pytest.fixture(scope="session") def upsample_position( common, common_position, @@ -63,7 +65,7 @@ def upsample_position( (param_table & upsample_param_key).delete(safemode=False) -@pytest.fixture +@pytest.fixture(scope="session") def interval_pos_key(upsample_position): yield upsample_position @@ -72,7 +74,7 @@ def test_interval_position_info_insert(common_position, interval_pos_key): assert common_position.IntervalPositionInfo & interval_pos_key -@pytest.fixture +@pytest.fixture(scope="session") def upsample_position_error( upsample_position, default_param_key, @@ -147,6 +149,76 @@ def test_interval_position_info_kwarg_alias(interval_position_info): ), "IntervalPositionInfo._fix_kwargs() should alias old arg names." -@pytest.mark.skip(reason="Not testing with video data yet.") -def test_position_video(common_position): - pass +@pytest.fixture(scope="session") +def position_video(common_position): + yield common_position.PositionVideo() + + +def test_position_video(position_video, upsample_position): + _ = position_video.populate() + assert len(position_video) == 1, "Failed to populate PositionVideo table." + + +def test_convert_to_pixels(position_video): + + data = np.array([[2, 4], [6, 8]]) + expect = np.array([[1, 2], [3, 4]]) + output = position_video.convert_to_pixels(data, "junk", 2) + + assert np.array_equal(output, expect), "Failed to convert to pixels." + + +@pytest.fixture(scope="session") +def rename_default_cols(common_position): + yield common_position._fix_col_names, ["xloc", "yloc", "xloc2", "yloc2"] + + +@pytest.mark.parametrize( + "col_type, cols", + [ + ("DEFAULT_COLS", ["xloc", "yloc", "xloc2", "yloc2"]), + ("ONE_IDX_COLS", ["xloc1", "yloc1", "xloc2", "yloc2"]), + ("ZERO_IDX_COLS", ["xloc0", "yloc0", "xloc1", "yloc1"]), + ], +) +def test_rename_columns(rename_default_cols, col_type, cols): + + _fix_col_names, defaults = rename_default_cols + df = pd.DataFrame([range(len(cols) + 1)], columns=["junk"] + cols) + result = _fix_col_names(df).columns.tolist() + + assert result == defaults, f"_fix_col_names failed to rename {col_type}." + + +def test_rename_three_d(rename_default_cols): + _fix_col_names, _ = rename_default_cols + three_d = ["junk", "x", "y", "z"] + df = pd.DataFrame([range(4)], columns=three_d) + result = _fix_col_names(df).columns.tolist() + + assert ( + result == three_d[1:] + ), "_fix_col_names failed to rename THREE_D_COLS." + + +def test_rename_non_default_columns(monkeypatch, rename_default_cols): + _fix_col_names, defaults = rename_default_cols + df = pd.DataFrame([range(4)], columns=["a", "b", "c", "d"]) + + # Monkeypatch the input function + monkeypatch.setattr("builtins.input", lambda _: "yes") + result = _fix_col_names(df).columns.tolist() + + assert ( + result == defaults + ), "_fix_col_names failed to rename non-default cols." + + +def test_rename_non_default_columns_err(monkeypatch, rename_default_cols): + _fix_col_names, defaults = rename_default_cols + df = pd.DataFrame([range(4)], columns=["a", "b", "c", "d"]) + + monkeypatch.setattr("builtins.input", lambda _: "no") + + with pytest.raises(ValueError): + _fix_col_names(df) diff --git a/tests/conftest.py b/tests/conftest.py index cd9350ff1..fe8ce1a5b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,7 @@ import warnings from contextlib import nullcontext from pathlib import Path -from subprocess import Popen +from shutil import rmtree as shutil_rmtree from time import sleep as tsleep import datajoint as dj @@ -18,15 +18,22 @@ import pynwb import pytest from datajoint.logging import logger as dj_logger +from numba import NumbaWarning +from pandas.errors import PerformanceWarning from .container import DockerMySQLManager +from .data_downloader import DataDownloader warnings.filterwarnings("ignore", category=UserWarning, module="hdmf") +warnings.filterwarnings("ignore", module="tensorflow") +warnings.filterwarnings("ignore", category=FutureWarning, module="sklearn") +warnings.filterwarnings("ignore", category=PerformanceWarning, module="pandas") +warnings.filterwarnings("ignore", category=NumbaWarning, module="numba") # ------------------------------- TESTS CONFIG ------------------------------- # globals in pytest_configure: -# BASE_DIR, RAW_DIR, SERVER, TEARDOWN, VERBOSE, TEST_FILE, DOWNLOAD +# BASE_DIR, RAW_DIR, SERVER, TEARDOWN, VERBOSE, TEST_FILE, DOWNLOAD, NO_DLC def pytest_addoption(parser): @@ -39,10 +46,10 @@ def pytest_addoption(parser): Parameters ---------- --quiet-spy (bool): Default False. Allow print statements from Spyglass. + --base-dir (str): Default './tests/test_data/'. Dir for local input file. --no-teardown (bool): Default False. Delete pipeline on close. - --no-server (bool): Default False. Run datajoint server in Docker. - --datadir (str): Default './tests/test_data/'. Dir for local input file. - WARNING: not yet implemented. + --no-docker (bool): Default False. Run datajoint mysql server in Docker. + --no-dlc (bool): Default False. Skip DLC tests. Also skip video downloads. """ parser.addoption( "--quiet-spy", @@ -52,11 +59,11 @@ def pytest_addoption(parser): help="Quiet logging from Spyglass.", ) parser.addoption( - "--no-server", - action="store_true", - dest="no_server", - default=False, - help="Do not launch datajoint server in Docker.", + "--base-dir", + action="store", + default="./tests/_data/", + dest="base_dir", + help="Directory for local input file.", ) parser.addoption( "--no-teardown", @@ -66,20 +73,28 @@ def pytest_addoption(parser): help="Tear down tables after tests.", ) parser.addoption( - "--base-dir", - action="store", - default="./tests/_data/", - dest="base_dir", - help="Directory for local input file.", + "--no-docker", + action="store_true", + dest="no_docker", + default=False, + help="Do not launch datajoint server in Docker.", + ) + parser.addoption( + "--no-dlc", + action="store_true", + dest="no_dlc", + default=False, + help="Skip downloads for and tests of DLC-dependent features.", ) def pytest_configure(config): - global BASE_DIR, RAW_DIR, SERVER, TEARDOWN, VERBOSE, TEST_FILE, DOWNLOAD + global BASE_DIR, RAW_DIR, SERVER, TEARDOWN, VERBOSE, TEST_FILE, DOWNLOADS, NO_DLC TEST_FILE = "minirec20230622.nwb" TEARDOWN = not config.option.no_teardown VERBOSE = not config.option.quiet_spy + NO_DLC = config.option.no_dlc BASE_DIR = Path(config.option.base_dir).absolute() BASE_DIR.mkdir(parents=True, exist_ok=True) @@ -89,50 +104,16 @@ def pytest_configure(config): SERVER = DockerMySQLManager( restart=TEARDOWN, shutdown=TEARDOWN, - null_server=config.option.no_server, + null_server=config.option.no_docker, verbose=VERBOSE, ) - DOWNLOAD = download_data(verbose=VERBOSE) - -def data_is_downloaded(): - """Check if data is downloaded.""" - return os.path.exists(RAW_DIR / TEST_FILE) - - -def download_data(verbose=False): - """Download data from BOX using environment variable credentials. - - Note: In gh-actions, this is handled by the test-conda workflow. - """ - if data_is_downloaded(): - return None - UCSF_BOX_USER = os.environ.get("UCSF_BOX_USER") - UCSF_BOX_TOKEN = os.environ.get("UCSF_BOX_TOKEN") - if not all([UCSF_BOX_USER, UCSF_BOX_TOKEN]): - raise ValueError( - "Missing data, no credentials: UCSF_BOX_USER or UCSF_BOX_TOKEN." - ) - data_url = f"ftps://ftp.box.com/trodes_to_nwb_test_data/{TEST_FILE}" - - cmd = [ - "wget", - "--recursive", - "--no-host-directories", - "--no-directories", - "--user", - UCSF_BOX_USER, - "--password", - UCSF_BOX_TOKEN, - "-P", - RAW_DIR, - data_url, - ] - if not verbose: - cmd.insert(cmd.index("--recursive") + 1, "--no-verbose") - cmd_kwargs = dict(stdout=sys.stdout, stderr=sys.stderr) if verbose else {} - - return Popen(cmd, **cmd_kwargs) + DOWNLOADS = DataDownloader( + nwb_file_name=TEST_FILE, + base_dir=BASE_DIR, + verbose=VERBOSE, + download_dlc=not NO_DLC, + ) def pytest_unconfigure(config): @@ -231,10 +212,10 @@ def mini_path(raw_dir): path = raw_dir / TEST_FILE # wait for wget download to finish - if DOWNLOAD is not None: - DOWNLOAD.wait() + if (nwb_download := DOWNLOADS.file_downloads.get(TEST_FILE)) is not None: + nwb_download.wait() - # wait for gh-actions download to finish + # wait for download to finish timeout, wait, found = 60, 5, False for _ in range(timeout // wait): if path.exists(): @@ -248,6 +229,17 @@ def mini_path(raw_dir): yield path +@pytest.fixture(scope="session") +def nodlc(request): + yield NO_DLC + + +@pytest.fixture(scope="session") +def skipif_nodlc(request): + if NO_DLC: + yield pytest.mark.skip(reason="Skipping DLC-dependent tests.") + + @pytest.fixture(scope="session") def mini_copy_name(mini_path): from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename # noqa: E402 @@ -324,7 +316,7 @@ def mini_insert( yield close_nwb_files() - # Note: no need to run deletes in teardown, bc removing the container + # Note: teardown will remove the container, deleting all data @pytest.fixture(scope="session") @@ -403,6 +395,19 @@ def populate_exception(): yield PopulateException +# -------------------------- FIXTURES, COMMON TABLES -------------------------- + + +@pytest.fixture(scope="session") +def video_keys(common, base_dir): + for file, download in DOWNLOADS.file_downloads.items(): + if file.endswith(".h264") and download is not None: + download.wait() # wait for videos to finish downloading + DOWNLOADS.rename_files() + + return common.VideoFile().fetch(as_dict=True) + + # ------------------------- FIXTURES, POSITION TABLES ------------------------- @@ -439,11 +444,11 @@ def trodes_params(trodes_params_table, teardown): "params": { **params, "is_upsampled": 1, - "upsampling_sampling_rate": 500, + "upsampling_sampling_rate": 500, # TODO - lower this to speed up }, }, } - trodes_params_table.get_default() + _ = trodes_params_table.get_default() trodes_params_table.insert( [v for k, v in paramsets.items()], skip_duplicates=True ) @@ -771,3 +776,453 @@ def lfp_merge_key(populate_lfp): @pytest.fixture(scope="session") def lfp_v1_key(lfp, lfp_s_key): yield (lfp.v1.LFPV1 & lfp_s_key).fetch1("KEY") + + +# --------------------------- FIXTURES, DLC TABLES ---------------------------- +# ---------------- Note: DLCOutput is used to test RestrGraph ----------------- + + +@pytest.fixture(scope="session") +def bodyparts(sgp): + bps = ["whiteLED", "tailBase", "tailMid", "tailTip"] + sgp.v1.BodyPart.insert( + [{"bodypart": bp, "bodypart_description": "none"} for bp in bps], + skip_duplicates=True, + ) + + yield bps + + +@pytest.fixture(scope="session") +def dlc_project_tbl(sgp): + yield sgp.v1.DLCProject() + + +@pytest.fixture(scope="session") +def dlc_project_name(): + yield "pytest_proj" + + +@pytest.fixture(scope="session") +def insert_project( + verbose_context, + teardown, + dlc_project_name, + dlc_project_tbl, + common, + bodyparts, + mini_copy_name, +): + if NO_DLC: + pytest.skip("Skipping DLC-dependent tests.") + + from deeplabcut.utils.auxiliaryfunctions import read_config, write_config + + team_name = "sc_eb" + common.LabTeam.insert1({"team_name": team_name}, skip_duplicates=True) + with verbose_context: + project_key = dlc_project_tbl.insert_new_project( + project_name=dlc_project_name, + bodyparts=bodyparts, + lab_team=team_name, + frames_per_video=100, + video_list=[ + {"nwb_file_name": mini_copy_name, "epoch": 0}, + {"nwb_file_name": mini_copy_name, "epoch": 1}, + ], + skip_duplicates=True, + ) + config_path = (dlc_project_tbl & project_key).fetch1("config_path") + cfg = read_config(config_path) + cfg.update( + { + "numframes2pick": 2, + "maxiters": 2, + "scorer": team_name, + "skeleton": [ + ["whiteLED"], + [ + ["tailMid", "tailMid"], + ["tailBase", "tailBase"], + ["tailTip", "tailTip"], + ], + ], # eb's has video_sets: {1: {'crop': [0, 1260, 0, 728]}} + } + ) + + write_config(config_path, cfg) + + yield project_key, cfg, config_path + + if teardown: + (dlc_project_tbl & project_key).delete(safemode=False) + shutil_rmtree(str(Path(config_path).parent)) + + +@pytest.fixture(scope="session") +def project_key(insert_project): + yield insert_project[0] + + +@pytest.fixture(scope="session") +def dlc_config(insert_project): + yield insert_project[1] + + +@pytest.fixture(scope="session") +def config_path(insert_project): + yield insert_project[2] + + +@pytest.fixture(scope="session") +def project_dir(config_path): + yield Path(config_path).parent + + +@pytest.fixture(scope="session") +def extract_frames( + verbose_context, dlc_project_tbl, project_key, dlc_config, project_dir +): + with verbose_context: + dlc_project_tbl.run_extract_frames( + project_key, userfeedback=False, mode="automatic" + ) + vid_name = list(dlc_config["video_sets"].keys())[0].split("/")[-1] + label_dir = project_dir / "labeled-data" / vid_name.split(".")[0] + + yield label_dir + + for file in label_dir.glob("*png"): + if file.stem in ["img000", "img001"]: + continue + file.unlink() + + +@pytest.fixture(scope="session") +def labeled_vid_dir(extract_frames): + yield extract_frames + + +@pytest.fixture(scope="session") +def fix_downloaded(labeled_vid_dir, project_dir): + """Grabs CollectedData and img files from project_dir, moves to labeled""" + for file in project_dir.parent.parent.glob("*"): + if file.is_dir(): + continue + dest = labeled_vid_dir / file.name + if dest.exists(): + dest.unlink() + dest.write_bytes(file.read_bytes()) + # TODO: revert to rename before merge + # file.rename(labeled_vid_dir / file.name) + + yield + + +@pytest.fixture(scope="session") +def add_training_files(dlc_project_tbl, project_key, fix_downloaded): + dlc_project_tbl.add_training_files(project_key, skip_duplicates=True) + yield + + +@pytest.fixture(scope="session") +def dlc_training_params(sgp): + params_tbl = sgp.v1.DLCModelTrainingParams() + params_name = "pytest" + yield params_tbl, params_name + + +@pytest.fixture(scope="session") +def training_params_key(verbose_context, sgp, project_key, dlc_training_params): + params_tbl, params_name = dlc_training_params + with verbose_context: + params_tbl.insert_new_params( + paramset_name=params_name, + params={ + "trainingsetindex": 0, + "shuffle": 1, + "gputouse": None, + "TFGPUinference": False, + "net_type": "resnet_50", + "augmenter_type": "imgaug", + "video_sets": "test skipping param", + }, + skip_duplicates=True, + ) + yield {"dlc_training_params_name": params_name} + + +@pytest.fixture(scope="session") +def model_train_key(sgp, project_key, training_params_key): + _ = project_key.pop("config_path", None) + model_train_key = { + **project_key, + **training_params_key, + } + sgp.v1.DLCModelTrainingSelection().insert1( + { + **model_train_key, + "model_prefix": "", + }, + skip_duplicates=True, + ) + yield model_train_key + + +@pytest.fixture(scope="session") +def populate_training(sgp, fix_downloaded, model_train_key, add_training_files): + train_tbl = sgp.v1.DLCModelTraining + if len(train_tbl & model_train_key) == 0: + _ = add_training_files + _ = fix_downloaded + sgp.v1.DLCModelTraining.populate(model_train_key) + yield model_train_key + + +@pytest.fixture(scope="session") +def model_source_key(sgp, model_train_key, populate_training): + yield (sgp.v1.DLCModelSource & model_train_key).fetch1("KEY") + + +@pytest.fixture(scope="session") +def model_key(sgp, model_source_key): + model_key = {**model_source_key, "dlc_model_params_name": "default"} + _ = sgp.v1.DLCModelParams.get_default() + sgp.v1.DLCModelSelection().insert1(model_key, skip_duplicates=True) + yield model_key + + +@pytest.fixture(scope="session") +def populate_model(sgp, model_key): + model_tbl = sgp.v1.DLCModel + if model_tbl & model_key: + yield + else: + sgp.v1.DLCModel.populate(model_key) + yield + + +@pytest.fixture(scope="session") +def pose_estimation_key(sgp, mini_copy_name, populate_model, model_key): + yield sgp.v1.DLCPoseEstimationSelection.insert_estimation_task( + { + "nwb_file_name": mini_copy_name, + "epoch": 1, + "video_file_num": 0, + **model_key, + }, + task_mode="trigger", # trigger or load + params={"gputouse": None, "videotype": "mp4", "TFGPUinference": False}, + ) + + +@pytest.fixture(scope="session") +def populate_pose_estimation(sgp, pose_estimation_key): + pose_est_tbl = sgp.v1.DLCPoseEstimation() + if len(pose_est_tbl & pose_estimation_key) < 1: + pose_est_tbl.populate(pose_estimation_key) + yield pose_est_tbl + + +@pytest.fixture(scope="session") +def si_params_name(sgp, populate_pose_estimation): + params_name = "low_bar" + params_tbl = sgp.v1.DLCSmoothInterpParams + # if len(params_tbl & {"dlc_si_params_name": params_name}) < 1: + if True: # TODO: remove before merge + nan_params = params_tbl.get_nan_params() + nan_params["dlc_si_params_name"] = params_name + nan_params["params"].update( + { + "likelihood_thresh": 0.4, + "max_cm_between_pts": 100, + "num_inds_to_span": 50, + # Smoothing and Interpolation added later - must check + "smoothing_params": {"smoothing_duration": 0.05}, + "interp_params": {"max_cm_to_interp": 100}, + } + ) + params_tbl.insert1(nan_params, skip_duplicates=True) + + yield params_name + + +@pytest.fixture(scope="session") +def si_key(sgp, bodyparts, si_params_name, pose_estimation_key): + key = { + key: val + for key, val in pose_estimation_key.items() + if key in sgp.v1.DLCSmoothInterpSelection.primary_key + } + sgp.v1.DLCSmoothInterpSelection.insert( + [ + { + **key, + "bodypart": bodypart, + "dlc_si_params_name": si_params_name, + } + for bodypart in bodyparts[:1] + ], + skip_duplicates=True, + ) + yield key + + +@pytest.fixture(scope="session") +def populate_si(sgp, si_key, populate_pose_estimation): + sgp.v1.DLCSmoothInterp.populate() + yield + + +@pytest.fixture(scope="session") +def cohort_selection(sgp, si_key, si_params_name): + cohort_key = { + k: v + for k, v in { + **si_key, + "dlc_si_cohort_selection_name": "whiteLED", + "bodyparts_params_dict": { + "whiteLED": si_params_name, + }, + }.items() + if k not in ["bodypart", "dlc_si_params_name"] + } + sgp.v1.DLCSmoothInterpCohortSelection().insert1( + cohort_key, skip_duplicates=True + ) + yield cohort_key + + +@pytest.fixture(scope="session") +def cohort_key(sgp, cohort_selection): + yield cohort_selection.copy() + + +@pytest.fixture(scope="session") +def populate_cohort(sgp, cohort_selection, populate_si): + sgp.v1.DLCSmoothInterpCohort.populate(cohort_selection) + + +@pytest.fixture(scope="session") +def centroid_params(sgp): + params_tbl = sgp.v1.DLCCentroidParams + params_key = {"dlc_centroid_params_name": "one_test"} + if len(params_tbl & params_key) == 0: + params_tbl.insert1( + { + **params_key, + "params": { + "centroid_method": "one_pt_centroid", + "points": {"point1": "whiteLED"}, + "interpolate": True, + "interp_params": {"max_cm_to_interp": 100}, + "smooth": True, + "smoothing_params": { + "smoothing_duration": 0.05, + "smooth_method": "moving_avg", + }, + "max_LED_separation": 50, + "speed_smoothing_std_dev": 0.100, + }, + } + ) + yield params_key + + +@pytest.fixture(scope="session") +def centroid_selection(sgp, cohort_key, populate_cohort, centroid_params): + centroid_key = cohort_key.copy() + centroid_key = { + key: val + for key, val in cohort_key.items() + if key in sgp.v1.DLCCentroidSelection.primary_key + } + centroid_key.update(centroid_params) + sgp.v1.DLCCentroidSelection.insert1(centroid_key, skip_duplicates=True) + yield centroid_key + + +@pytest.fixture(scope="session") +def centroid_key(sgp, centroid_selection): + yield centroid_selection.copy() + + +@pytest.fixture(scope="session") +def populate_centroid(sgp, centroid_selection): + sgp.v1.DLCCentroid.populate(centroid_selection) + + +@pytest.fixture(scope="session") +def orient_params(sgp): + params_tbl = sgp.v1.DLCOrientationParams + params_key = {"dlc_orientation_params_name": "none"} + if len(params_tbl & params_key) == 0: + params_tbl.insert1( + { + **params_key, + "params": { + "orient_method": "none", + "bodypart1": "whiteLED", + "orientation_smoothing_std_dev": 0.001, + }, + } + ) + return params_key + + +@pytest.fixture(scope="session") +def orient_selection(sgp, cohort_key, orient_params): + orient_key = { + key: val + for key, val in cohort_key.items() + if key in sgp.v1.DLCOrientationSelection.primary_key + } + orient_key.update(orient_params) + sgp.v1.DLCOrientationSelection().insert1(orient_key, skip_duplicates=True) + yield orient_key + + +@pytest.fixture(scope="session") +def orient_key(sgp, orient_selection): + yield orient_selection.copy() + + +@pytest.fixture(scope="session") +def populate_orient(sgp, orient_selection): + sgp.v1.DLCOrientation().populate(orient_selection) + yield + + +@pytest.fixture(scope="session") +def dlc_selection(sgp, centroid_key, orient_key, populate_orient): + dlc_key = { + key: val + for key, val in centroid_key.items() + if key in sgp.v1.DLCPosV1.primary_key + } + dlc_key.update( + { + "dlc_si_cohort_centroid": centroid_key[ + "dlc_si_cohort_selection_name" + ], + "dlc_si_cohort_orientation": orient_key[ + "dlc_si_cohort_selection_name" + ], + "dlc_orientation_params_name": orient_key[ + "dlc_orientation_params_name" + ], + } + ) + sgp.v1.DLCPosSelection().insert1(dlc_key, skip_duplicates=True) + yield dlc_key + + +@pytest.fixture(scope="session") +def dlc_key(sgp, dlc_selection): + yield dlc_selection.copy() + + +@pytest.fixture(scope="session") +def populate_dlc(sgp, dlc_key): + sgp.v1.DLCPosV1().populate(dlc_key) + yield diff --git a/tests/container.py b/tests/container.py index fa26f1c46..b9d77263e 100644 --- a/tests/container.py +++ b/tests/container.py @@ -46,7 +46,7 @@ def __init__( self.mysql_version = mysql_version self.container_name = container_name self.port = port or "330" + self.mysql_version[0] - self.client = docker.from_env() + self.client = None if null_server else docker.from_env() self.null_server = null_server self.password = "tutorial" self.user = "root" @@ -64,10 +64,14 @@ def __init__( @property def container(self) -> docker.models.containers.Container: + if self.null_server: + return self.container_name return self.client.containers.get(self.container_name) @property def container_status(self) -> str: + if self.null_server: + return None try: self.container.reload() return self.container.status @@ -76,6 +80,8 @@ def container_status(self) -> str: @property def container_health(self) -> str: + if self.null_server: + return None try: self.container.reload() return self.container.health @@ -125,7 +131,6 @@ def wait(self, timeout=120, wait=3) -> None: wait : int Time to wait between checks in seconds. Default 5. """ - if self.null_server: return None if not self.container_status or self.container_status == "exited": @@ -209,9 +214,10 @@ def stop(self, remove=True) -> None: if not self.container_status or self.container_status == "exited": return + container_name = self.container_name self.container.stop() - self.logger.info(f"Container {self.container_name} stopped.") + self.logger.info(f"Container {container_name} stopped.") if remove: self.container.remove() - self.logger.info(f"Container {self.container_name} removed.") + self.logger.info(f"Container {container_name} removed.") diff --git a/tests/data_downloader.py b/tests/data_downloader.py new file mode 100644 index 000000000..98a254eda --- /dev/null +++ b/tests/data_downloader.py @@ -0,0 +1,139 @@ +from functools import cached_property +from os import environ as os_environ +from pathlib import Path +from subprocess import DEVNULL, Popen +from sys import stderr, stdout +from typing import Dict, Union + +UCSF_BOX_USER = os_environ.get("UCSF_BOX_USER") +UCSF_BOX_TOKEN = os_environ.get("UCSF_BOX_TOKEN") +BASE_URL = "ftps://ftp.box.com/trodes_to_nwb_test_data/" + +NON_DLC = 3 # First N items below are not for DeepLabCut +FILE_PATHS = [ + { + "relative_dir": "raw", + "target_name": "minirec20230622.nwb", + "url": BASE_URL + "minirec20230622.nwb", + }, + { + "relative_dir": "video", + "target_name": "20230622_minirec_01_s1.1.h264", + "url": BASE_URL + "20230622_sample_01_a1/20230622_sample_01_a1.1.h264", + }, + { + "relative_dir": "video", + "target_name": "20230622_minirec_02_s2.1.h264", + "url": BASE_URL + "20230622_sample_02_a1/20230622_sample_02_a1.1.h264", + }, + { + "relative_dir": "deeplabcut", + "target_name": "CollectedData_sc_eb.csv", + "url": BASE_URL + "minirec_dlc_items/CollectedData_sc_eb.csv", + }, + { + "relative_dir": "deeplabcut", + "target_name": "CollectedData_sc_eb.h5", + "url": BASE_URL + "minirec_dlc_items/CollectedData_sc_eb.h5", + }, + { + "relative_dir": "deeplabcut", + "target_name": "img000.png", + "url": BASE_URL + "minirec_dlc_items/img000.png", + }, + { + "relative_dir": "deeplabcut", + "target_name": "img001.png", + "url": BASE_URL + "minirec_dlc_items/img001.png", + }, +] + + +class DataDownloader: + def __init__( + self, + nwb_file_name, + file_paths=FILE_PATHS, + base_dir=".", + download_dlc=True, + verbose=True, + ): + if not all([UCSF_BOX_USER, UCSF_BOX_TOKEN]): + raise ValueError( + "Missing os.environ credentials: UCSF_BOX_USER, UCSF_BOX_TOKEN." + ) + if nwb_file_name != file_paths[0]["target_name"]: + raise ValueError( + f"Please adjust data_downloader.py to match: {nwb_file_name}" + ) + + self.cmd = [ + "wget", + "--recursive", + "--no-host-directories", + "--no-directories", + "--user", + UCSF_BOX_USER, + "--password", + UCSF_BOX_TOKEN, + "-P", # Then need relative path, then url + ] + + self.verbose = verbose + if not verbose: + self.cmd.insert(self.cmd.index("--recursive") + 1, "--no-verbose") + self.cmd_kwargs = dict(stdout=DEVNULL, stderr=DEVNULL) + else: + self.cmd_kwargs = dict(stdout=stdout, stderr=stderr) + + self.base_dir = Path(base_dir).resolve() + self.file_paths = file_paths if download_dlc else file_paths[:NON_DLC] + self.base_dir.mkdir(exist_ok=True) + + # Start downloads + _ = self.file_downloads + + def rename_files(self): + """Redund, but allows rerun later in startup process of conftest.""" + for path in self.file_paths: + target, url = path["target_name"], path["url"] + target_dir = self.base_dir / path["relative_dir"] + orig = target_dir / url.split("/")[-1] + dest = target_dir / target + + if orig.exists(): + orig.rename(dest) + + @cached_property # Only make list of processes once + def file_downloads(self) -> Dict[str, Union[Popen, None]]: + """{File: POpen/None} for each file. If exists/finished, None.""" + ret = dict() + self.rename_files() + for path in self.file_paths: + target, url = path["target_name"], path["url"] + target_dir = self.base_dir / path["relative_dir"] + dest = target_dir / target + + if dest.exists(): + ret[target] = None + continue + + target_dir.mkdir(exist_ok=True, parents=True) + ret[target] = Popen(self.cmd + [target_dir, url], **self.cmd_kwargs) + return ret + + def check_download(self, download, info): + if download is not None: + download.wait() + if download.returncode: + return download + return None + + @property + def download_errors(self): + ret = [] + for download, item in zip(self.file_downloads, self.file_paths): + if d_status := self.check_download(download, item): + ret.append(d_status) + continue + return ret diff --git a/tests/position/__init__.py b/tests/position/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/position/conftest.py b/tests/position/conftest.py new file mode 100644 index 000000000..c6c58d199 --- /dev/null +++ b/tests/position/conftest.py @@ -0,0 +1,92 @@ +""" +The following lines are not used in the course of regular pose processing and +can be removed so long as other functionality is not impacted. + +position_merge.py: 106-107, 110-123, 139-262 +dlc_decorators.py: 11, 16-18, 22 +dlc_reader.py : + 24, 38, 44-45, 51, 57-58, 61, 70, 74, 80-81, 135-137, 146, 149-162, 214, + 218 +dlc_utils.py : + 58, 61, 69, 72, 97-100, 104, 149-161, 232-235, 239-241, 246, 259, 280, + 293-305, 310-316, 328-341, 356-373, 395, 404, 480, 487-488, 530, 548-561, + 594-601, 611-612, 641-657, 682-736, 762-772, 787, 809-1286 +""" + +from itertools import product as iter_product + +import numpy as np +import pandas as pd +import pytest + + +@pytest.fixture(scope="session") +def dlc_video_params(sgp): + sgp.v1.DLCPosVideoParams.insert_default() + params_key = {"dlc_pos_video_params_name": "five_percent"} + sgp.v1.DLCPosVideoParams.insert1( + { + **params_key, + "params": { + "percent_frames": 0.05, + "incl_likelihood": True, + }, + }, + skip_duplicates=True, + ) + yield params_key + + +@pytest.fixture(scope="session") +def dlc_video_selection(sgp, dlc_key, dlc_video_params, populate_dlc): + s_key = {**dlc_key, **dlc_video_params} + sgp.v1.DLCPosVideoSelection.insert1(s_key, skip_duplicates=True) + yield dlc_key + + +@pytest.fixture(scope="session") +def populate_dlc_video(sgp, dlc_video_selection): + sgp.v1.DLCPosVideo.populate(dlc_video_selection) + yield sgp.v1.DLCPosVideo() + + +@pytest.fixture(scope="session") +def populate_evaluation(sgp, populate_model): + sgp.v1.DLCEvaluation.populate() + yield + + +def generate_led_df(leds, inc_vals=False): + """Returns df with all combinations of 1 and np.nan for each led. + + If inc_vals is True, the values will be incremented by 1 for each non-nan""" + all_vals = list(zip(*iter_product([1, np.nan], repeat=len(leds)))) + n_rows = len(all_vals[0]) + indices = np.random.uniform(1.6223e09, 1.6224e09, n_rows) + + data = dict() + for led, values in zip(leds, all_vals): + data.update( + { + (led, "video_frame_id"): { + i: f for i, f in zip(indices, range(n_rows + 1)) + }, + (led, "x"): {i: v for i, v in zip(indices, values)}, + (led, "y"): {i: v for i, v in zip(indices, values)}, + } + ) + df = pd.DataFrame(data) + + if not inc_vals: + return df + + count = [0] + + def increment_count(): + count[0] += 1 + return count[0] + + def process_value(x): + return increment_count() if x == 1 else x + + return df.applymap(process_value) diff --git a/tests/position/test_dlc_cent.py b/tests/position/test_dlc_cent.py new file mode 100644 index 000000000..a3675b2ae --- /dev/null +++ b/tests/position/test_dlc_cent.py @@ -0,0 +1,63 @@ +import numpy as np +import pytest + +from .conftest import generate_led_df + + +@pytest.fixture(scope="session") +def centroid_df(sgp, centroid_key, populate_centroid): + yield (sgp.v1.DLCCentroid & centroid_key).fetch1_dataframe() + + +def test_centroid_fetch1_dataframe(centroid_df): + df_cols = centroid_df.columns + exp_cols = [ + "video_frame_ind", + "position_x", + "position_y", + "velocity_x", + "velocity_y", + "speed", + ] + + assert all( + e in df_cols for e in exp_cols + ), f"Unexpected cols in position merge dataframe: {df_cols}" + + +@pytest.fixture(scope="session") +def params_tbl(sgp): + yield sgp.v1.DLCCentroidParams() + + +def test_insert_default_params(params_tbl): + ret = params_tbl.get_default() + assert "default" in params_tbl.fetch( + "dlc_centroid_params_name" + ), "Default params not inserted" + assert ( + ret["dlc_centroid_params_name"] == "default" + ), "Default params not inserted" + + +def test_validate_params(params_tbl): + params = params_tbl.get_default() + params["dlc_centroid_params_name"] = "other test" + params_tbl.insert1(params, skip_duplicates=True) + + +@pytest.mark.parametrize( + "key", ["four_led_centroid", "two_pt_centroid", "one_pt_centroid"] +) +def test_centroid_calcs(key, sgp): + points = sgp.v1.position_dlc_centroid._key_to_points[key] + func = sgp.v1.position_dlc_centroid._key_to_func_dict[key] + + df = generate_led_df(points) + ret = func(df, max_LED_separation=100, points={p: p for p in points}) + + assert np.all(ret[:-1] == 1), f"Centroid calculation failed for {key}" + assert np.all(np.isnan(ret[-1])), f"Centroid calculation failed for {key}" + + with pytest.raises(KeyError): + func(df) # Missing led separation/point names diff --git a/tests/position/test_dlc_model.py b/tests/position/test_dlc_model.py new file mode 100644 index 000000000..6f1ccf89d --- /dev/null +++ b/tests/position/test_dlc_model.py @@ -0,0 +1,18 @@ +import pytest + + +def test_model_params_default(sgp): + assert sgp.v1.DLCModelParams.get_default() == { + "dlc_model_params_name": "default", + "params": { + "params": {}, + "shuffle": 1, + "trainingsetindex": 0, + "model_prefix": "", + }, + } + + +def test_model_input_assert(sgp): + with pytest.raises(AssertionError): + sgp.v1.DLCModelInput().insert1({"config_path": "/fake/path/"}) diff --git a/tests/position/test_dlc_orient.py b/tests/position/test_dlc_orient.py new file mode 100644 index 000000000..826df4cf9 --- /dev/null +++ b/tests/position/test_dlc_orient.py @@ -0,0 +1,45 @@ +import numpy as np +import pandas as pd +import pytest + +from .conftest import generate_led_df + + +def test_insert_params(sgp): + params_name = "test_params" + params_key = {"dlc_orientation_params_name": params_name} + params_tbl = sgp.v1.DLCOrientationParams() + params_tbl.insert_params( + params_name=params_name, params={}, skip_duplicates=True + ) + assert params_tbl & params_key, "Failed to insert params" + + defaults = params_tbl.get_default() + assert ( + defaults.get("params", {}).get("bodypart1") == "greenLED" + ), "Failed to insert default params" + + +def test_orient_fetch1_dataframe(sgp, orient_key, populate_orient): + """Fetches dataframe, but example data has one led, no orientation""" + fetched_df = (sgp.v1.DLCOrientation & orient_key).fetch1_dataframe() + assert isinstance(fetched_df, pd.DataFrame) + + +@pytest.mark.parametrize( + "key, points, exp_sum", + [ + ("none", ["none"], 0.0), + ("red_green_orientation", ["bodypart1", "bodypart2"], -2.356), + ("red_led_bisector", ["led1", "led2", "led3"], -1.571), + ], +) +def test_orient_calcs(sgp, key, points, exp_sum): + func = sgp.v1.position_dlc_orient._key_to_func_dict[key] + + df = generate_led_df(points, inc_vals=True) + df_sum = np.nansum(func(df, **{p: p for p in points})) + + assert np.isclose( + df_sum, exp_sum, atol=0.001 + ), f"Failed to calculate orient via {key}" diff --git a/tests/position/test_dlc_pos_est.py b/tests/position/test_dlc_pos_est.py new file mode 100644 index 000000000..fdf055843 --- /dev/null +++ b/tests/position/test_dlc_pos_est.py @@ -0,0 +1,36 @@ +import pytest + + +@pytest.fixture(scope="session") +def pos_est_sel(sgp): + yield sgp.v1.position_dlc_pose_estimation.DLCPoseEstimationSelection() + + +@pytest.mark.usefixtures("skipif_nodlc") +def test_rename_non_default_columns(sgp, common, pos_est_sel, video_keys): + vid_path, vid_name, _, _ = sgp.v1.dlc_utils.get_video_path(video_keys[0]) + + input = "0, 10, 0, 1000" + output = pos_est_sel.get_video_crop(vid_path + vid_name, input) + expected = [0, 10, 0, 1000] + + assert ( + output == expected + ), f"{pos_est_sel.table_name}.get_video_crop did not return expected output" + + +def test_invalid_video(pos_est_sel, pose_estimation_key): + _ = pose_estimation_key # Ensure populated + example_key = pos_est_sel.fetch("KEY", as_dict=True)[0] + example_key["nwb_file_name"] = "invalid.nwb" + with pytest.raises(FileNotFoundError): + pos_est_sel.insert_estimation_task(example_key) + + +def test_pose_est_dataframe(populate_pose_estimation): + pose_cols = populate_pose_estimation.fetch_dataframe().columns + + for bp in ["tailBase", "tailMid", "tailTip"]: + for val in ["video_frame_ind", "x", "y"]: + col = (bp, val) + assert col in pose_cols, f"PoseEstimation df missing column {col}." diff --git a/tests/position/test_dlc_position.py b/tests/position/test_dlc_position.py new file mode 100644 index 000000000..94646f315 --- /dev/null +++ b/tests/position/test_dlc_position.py @@ -0,0 +1,64 @@ +import pytest + + +@pytest.fixture(scope="session") +def si_params_tbl(sgp): + yield sgp.v1.DLCSmoothInterpParams() + + +def test_si_params_default(si_params_tbl): + assert si_params_tbl.get_default() == { + "dlc_si_params_name": "default", + "params": { + "interp_params": {"max_cm_to_interp": 15}, + "interpolate": True, + "likelihood_thresh": 0.95, + "max_cm_between_pts": 20, + "num_inds_to_span": 20, + "smooth": True, + "smoothing_params": { + "smooth_method": "moving_avg", + "smoothing_duration": 0.05, + }, + }, + } + assert si_params_tbl.get_nan_params() == { + "dlc_si_params_name": "just_nan", + "params": { + "interpolate": False, + "likelihood_thresh": 0.95, + "max_cm_between_pts": 20, + "num_inds_to_span": 20, + "smooth": False, + }, + } + assert list(si_params_tbl.get_available_methods()) == [ + "moving_avg" + ], f"{si_params_tbl.table_name}: unexpected available methods" + + +def test_invalid_params_insert(si_params_tbl): + with pytest.raises(KeyError): + si_params_tbl.insert1({"params": "invalid"}) + + +@pytest.fixture(scope="session") +def si_df(sgp, si_key, populate_si, bodyparts): + yield ( + sgp.v1.DLCSmoothInterp() & {**si_key, "bodypart": bodyparts[0]} + ).fetch1_dataframe() + + +def test_cohort_fetch1_dataframe(si_df): + df_cols = si_df.columns + exp_cols = ["video_frame_ind", "x", "y"] + assert all( + e in df_cols for e in exp_cols + ), f"Unexpected cols in DLCSmoothInterp dataframe: {df_cols}" + + +def test_all_nans(populate_pose_estimation, sgp): + pose_est_tbl = populate_pose_estimation + df = pose_est_tbl.BodyPart().fetch1_dataframe() + with pytest.raises(ValueError): + sgp.v1.position_dlc_position.nan_inds(df, 10, 0.99, 10) diff --git a/tests/position/test_dlc_proj.py b/tests/position/test_dlc_proj.py new file mode 100644 index 000000000..7eaba196d --- /dev/null +++ b/tests/position/test_dlc_proj.py @@ -0,0 +1,68 @@ +import pytest + + +def test_bp_insert(sgp): + bp_tbl = sgp.v1.position_dlc_project.BodyPart() + + bp_w_desc, desc = "test_bp", "test_desc" + bp_no_desc = "test_bp_no_desc" + + bp_tbl.add_from_config([bp_w_desc], [desc]) + bp_tbl.add_from_config([bp_no_desc]) + + assert bp_tbl & { + "bodypart": bp_w_desc, + "description": desc, + }, "Bodypart with description not inserted correctly" + assert bp_tbl & { + "bodypart": bp_no_desc, + "description": bp_no_desc, + }, "Bodypart without description not inserted correctly" + + +def test_project_insert(dlc_project_tbl, project_key): + assert dlc_project_tbl & project_key, "Project not inserted correctly" + + +@pytest.fixture +def new_project_key(): + return { + "project_name": "test_project_name", + "bodyparts": ["bp1"], + "lab_team": "any", + "frames_per_video": 1, + "video_list": ["any"], + "groupname": "fake group", + } + + +def test_failed_name_insert( + dlc_project_tbl, dlc_project_name, config_path, new_project_key +): + new_project_key.update({"project_name": dlc_project_name}) + existing_key = dlc_project_tbl.insert_new_project( + project_name=dlc_project_name, + bodyparts=["bp1"], + lab_team="any", + frames_per_video=1, + video_list=["any"], + groupname="any", + ) + expected_key = { + "project_name": dlc_project_name, + "config_path": config_path, + } + assert ( + existing_key == expected_key + ), "Project re-insert did not return expected key" + + +def test_failed_group_insert(dlc_project_tbl, new_project_key): + with pytest.raises(ValueError): + dlc_project_tbl.insert_new_project(**new_project_key) + + +def test_extract_frames(extract_frames, labeled_vid_dir): + extracted_files = list(labeled_vid_dir.glob("*.png")) + stems = set([f.stem for f in extracted_files]) - {"img000", "img001"} + assert len(stems) == 2, "Incorrect number of frames extracted" diff --git a/tests/position/test_dlc_sel.py b/tests/position/test_dlc_sel.py new file mode 100644 index 000000000..35b33fe06 --- /dev/null +++ b/tests/position/test_dlc_sel.py @@ -0,0 +1,17 @@ +def test_dlcvideo_default(sgp): + expected_default = { + "dlc_pos_video_params_name": "default", + "params": { + "incl_likelihood": True, + "percent_frames": 1, + "video_params": {"arrow_radius": 20, "circle_radius": 6}, + }, + } + + # run twice to trigger fetch existing + assert sgp.v1.DLCPosVideoParams.get_default() == expected_default + assert sgp.v1.DLCPosVideoParams.get_default() == expected_default + + +def test_dlc_video_populate(populate_dlc_video): + assert len(populate_dlc_video) > 0, "DLCPosVideo table is empty" diff --git a/tests/position/test_dlc_train.py b/tests/position/test_dlc_train.py new file mode 100644 index 000000000..eefa26f66 --- /dev/null +++ b/tests/position/test_dlc_train.py @@ -0,0 +1,37 @@ +import pytest + + +def test_existing_params( + verbose_context, dlc_training_params, training_params_key +): + params_tbl, params_name = dlc_training_params + + _ = training_params_key # Ensure populated + params_query = params_tbl & {"dlc_training_params_name": params_name} + assert params_query, "Existing params not found" + + with verbose_context: + params_tbl.insert_new_params( + paramset_name=params_name, + params={ + "shuffle": 1, + "trainingsetindex": 0, + "net_type": "any", + "gputouse": None, + }, + skip_duplicates=False, + ) + + assert len(params_query) == 1, "Existing params duplicated" + + +@pytest.mark.usefixtures("skipif_nodlc") +def test_get_params(nodlc, verbose_context, dlc_training_params): + if nodlc: # Decorator wasn't working here, so duplicate skipif + pytest.skip(reason="Skipping DLC-dependent tests.") + + params_tbl, _ = dlc_training_params + with verbose_context: + accepted_params = params_tbl.get_accepted_params() + + assert accepted_params is not None, "Failed to get accepted params" diff --git a/tests/position/test_pos_merge.py b/tests/position/test_pos_merge.py new file mode 100644 index 000000000..047129cd5 --- /dev/null +++ b/tests/position/test_pos_merge.py @@ -0,0 +1,24 @@ +import pytest + + +@pytest.fixture(scope="session") +def merge_df(sgp, pos_merge, dlc_key, populate_dlc): + merge_key = (pos_merge.DLCPosV1 & dlc_key).fetch1("KEY") + yield (pos_merge & merge_key).fetch1_dataframe() + + +def test_merge_dlc_fetch1_dataframe(merge_df): + df_cols = merge_df.columns + exp_cols = [ + "video_frame_ind", + "position_x", + "position_y", + "orientation", + "velocity_x", + "velocity_y", + "speed", + ] + + assert all( + e in df_cols for e in exp_cols + ), f"Unexpected cols in position merge dataframe: {df_cols}" diff --git a/tests/position/test_trodes.py b/tests/position/test_trodes.py index d4bc617f6..92fdfeeb1 100644 --- a/tests/position/test_trodes.py +++ b/tests/position/test_trodes.py @@ -59,3 +59,9 @@ def test_fetch_df(trodes_pos_v1, trodes_params): ) hash_exp = "5296e74dea2e5e68d39f81bc81723a12" assert hash_df == hash_exp, "Dataframe differs from expected" + + +def test_trodes_video(sgp): + vid_tbl = sgp.v1.TrodesPosVideo() + _ = vid_tbl.populate() + assert len(vid_tbl) == 2, "Failed to populate TrodesPosVideo" diff --git a/tests/utils/test_db_settings.py b/tests/utils/test_db_settings.py index 1c3efbead..3b72ec885 100644 --- a/tests/utils/test_db_settings.py +++ b/tests/utils/test_db_settings.py @@ -7,12 +7,16 @@ def db_settings(user_name): from spyglass.utils.database_settings import DatabaseSettings + id = getattr(docker_server.container, "id", None) + no_docker = id is None # If 'None', we're --no-docker in gh actions + return DatabaseSettings( user_name=user_name, host_name=docker_server.creds["database.host"], - target_database=docker_server.container.id, + target_database=id, exec_user=docker_server.creds["database.user"], exec_pass=docker_server.creds["database.password"], + test_mode=no_docker, ) diff --git a/tests/utils/test_mixin.py b/tests/utils/test_mixin.py index 010abf03c..5b6beb4d0 100644 --- a/tests/utils/test_mixin.py +++ b/tests/utils/test_mixin.py @@ -41,15 +41,19 @@ def test_merge_detect(Nwbfile, pos_merge_tables): ), "Merges not detected by mixin." -def test_merge_chain_join(Nwbfile, pos_merge_tables, lin_v1, lfp_merge_key): - """Test that the mixin can join merge chains.""" - _ = lin_v1, lfp_merge_key # merge tables populated +def test_merge_chain_join( + Nwbfile, pos_merge_tables, lin_v1, lfp_merge_key, populate_dlc +): + """Test that the mixin can join merge chains. + + NOTE: This will change if more data is added to merge tables.""" + _ = lin_v1, lfp_merge_key, populate_dlc # merge tables populated all_chains = [ chains.cascade(True, direction="down") for chains in Nwbfile._merge_chains.values() ] - end_len = [len(chain[0]) for chain in all_chains if chain] + end_len = [len(chain) for chain in all_chains] assert sum(end_len) == 4, "Merge chains not joined correctly." From 00ce11836fce8b55e4552330c1f374718a4d193c Mon Sep 17 00:00:00 2001 From: Kyu Hyun Lee Date: Wed, 29 May 2024 10:54:23 -0700 Subject: [PATCH 44/60] Don't insert lab member when creating lab team (#983) * Save LFP as pynwb.ecephys.LFP * Fix formatting * Fix formatting * Don't reinsert name * Recompose name * Fix second referece to * Update CHANGELOG.md --------- Co-authored-by: Eric Denovellis Co-authored-by: CBroz1 Co-authored-by: Eric Denovellis --- CHANGELOG.md | 2 ++ src/spyglass/common/common_lab.py | 13 +++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 565b0c301..03ec80e5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ - Allow dlc without pre-existing tracking data #973, #975 - Raise `KeyError` for missing input parameters across helper funcs #966 - `DLCPosVideo` table now inserts into self after `make` #966 +- Common + - Don't insert lab member when creating lab team #983 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index c5a6fbc00..de2ad8079 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -64,9 +64,9 @@ def insert_from_nwbfile(cls, nwbf): # each person is by default the member of their own LabTeam # (same as their name) - full_name, _, _ = decompose_name(experimenter) + full_name, first, last = decompose_name(experimenter) LabTeam.create_new_team( - team_name=full_name, team_members=[full_name] + team_name=full_name, team_members=[f"{last}, {first}"] ) @classmethod @@ -193,9 +193,10 @@ def create_new_team( member_list = [] for team_member in team_members: LabMember.insert_from_name(team_member) - query = ( - LabMember.LabMemberInfo() & {"lab_member_name": team_member} - ).fetch("google_user_name") + member_dict = {"lab_member_name": decompose_name(team_member)[0]} + query = (LabMember.LabMemberInfo() & member_dict).fetch( + "google_user_name" + ) if not query: logger.info( f"Please add the Google user ID for {team_member} in " @@ -203,7 +204,7 @@ def create_new_team( ) labteammember_dict = { "team_name": team_name, - "lab_member_name": team_member, + **member_dict, } member_list.append(labteammember_dict) # clear cache for this member From 04ec37a77a8b8873667784fa84d19a4ede81369a Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Mon, 3 Jun 2024 13:47:07 -0700 Subject: [PATCH 45/60] Add ability to set smoothing sigma in get_firing_rate (#994) * add option to set spike smoothing sigma * update changelog --- CHANGELOG.md | 2 ++ src/spyglass/spikesorting/analysis/v1/group.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03ec80e5b..c9362fe9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,8 @@ - `DLCPosVideo` table now inserts into self after `make` #966 - Common - Don't insert lab member when creating lab team #983 +- Spikesorting + - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/spikesorting/analysis/v1/group.py b/src/spyglass/spikesorting/analysis/v1/group.py index 1f20a4e11..77a3fad41 100644 --- a/src/spyglass/spikesorting/analysis/v1/group.py +++ b/src/spyglass/spikesorting/analysis/v1/group.py @@ -189,7 +189,11 @@ def get_spike_indicator(cls, key: dict, time: np.ndarray) -> np.ndarray: @classmethod def get_firing_rate( - cls, key: dict, time: np.ndarray, multiunit: bool = False + cls, + key: dict, + time: np.ndarray, + multiunit: bool = False, + smoothing_sigma: float = 0.015, ) -> np.ndarray: spike_indicator = cls.get_spike_indicator(key, time) if spike_indicator.ndim == 1: @@ -202,7 +206,9 @@ def get_firing_rate( return np.stack( [ get_multiunit_population_firing_rate( - indicator[:, np.newaxis], sampling_frequency + indicator[:, np.newaxis], + sampling_frequency, + smoothing_sigma, ) for indicator in spike_indicator.T ], From 6b49c2dea072a9ba32db93ace3e23911f1db9b77 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Tue, 4 Jun 2024 15:06:21 -0700 Subject: [PATCH 46/60] Add docstrings to SortedSpikesGroup and Decoding methods (#996) * Add docstrings * update changelog * fix spelling --------- Co-authored-by: Samuel Bray --- CHANGELOG.md | 1 + src/spyglass/decoding/v1/clusterless.py | 112 +++++++++++++++++- src/spyglass/decoding/v1/sorted_spikes.py | 95 ++++++++++++++- .../spikesorting/analysis/v1/group.py | 57 ++++++++- 4 files changed, 262 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9362fe9e..3e8d9a448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - Don't insert lab member when creating lab team #983 - Spikesorting - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 + - Update docstrings #996 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/decoding/v1/clusterless.py b/src/spyglass/decoding/v1/clusterless.py index d7ecc5ec2..9dd601651 100644 --- a/src/spyglass/decoding/v1/clusterless.py +++ b/src/spyglass/decoding/v1/clusterless.py @@ -274,7 +274,14 @@ def make(self, key): DecodingOutput.insert1(orig_key, skip_duplicates=True) - def fetch_results(self): + def fetch_results(self) -> xr.Dataset: + """Retrieve the decoding results + + Returns + ------- + xr.Dataset + The decoding results (posteriors, etc.) + """ return ClusterlessDetector.load_results(self.fetch1("results_path")) def fetch_model(self): @@ -282,6 +289,18 @@ def fetch_model(self): @staticmethod def fetch_environments(key): + """Fetch the environments for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + List[TrackGraph] + list of track graphs in the trained model + """ model_params = ( DecodingParameters & {"decoding_param_name": key["decoding_param_name"]} @@ -309,6 +328,18 @@ def fetch_environments(key): @staticmethod def _get_interval_range(key): + """Get the maximum range of model times in the encoding and decoding intervals + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + Tuple[float, float] + The minimum and maximum times for the model + """ encoding_interval = ( IntervalList & { @@ -338,6 +369,18 @@ def _get_interval_range(key): @staticmethod def fetch_position_info(key): + """Fetch the position information for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + Tuple[pd.DataFrame, List[str]] + The position information and the names of the position variables + """ position_group_key = { "position_group_name": key["position_group_name"], "nwb_file_name": key["nwb_file_name"], @@ -363,6 +406,18 @@ def fetch_position_info(key): @staticmethod def fetch_linear_position_info(key): + """Fetch the position information and project it onto the track graph + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + pd.DataFrame + The linearized position information + """ environment = ClusterlessDecodingV1.fetch_environments(key)[0] position_df = ClusterlessDecodingV1.fetch_position_info(key)[0] @@ -391,6 +446,22 @@ def fetch_linear_position_info(key): @staticmethod def fetch_spike_data(key, filter_by_interval=True): + """Fetch the spike times for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + filter_by_interval : bool, optional + Whether to filter for spike times in the model interval, by default True + time_slice : Slice, optional + User provided slice of time to restrict spikes to, by default None + + Returns + ------- + list[np.ndarray] + List of spike times for each unit in the model's spike group + """ waveform_keys = ( ( UnitWaveformFeaturesGroup.UnitFeatures @@ -426,6 +497,20 @@ def fetch_spike_data(key, filter_by_interval=True): @classmethod def get_spike_indicator(cls, key, time): + """get spike indicator matrix for the group + + Parameters + ---------- + key : dict + key to identify the group + time : np.ndarray + time vector for which to calculate the spike indicator matrix + + Returns + ------- + np.ndarray + spike indicator matrix with shape (len(time), n_units) + """ time = np.asarray(time) min_time, max_time = time[[0, -1]] spike_times = cls.fetch_spike_data(key)[0] @@ -442,6 +527,24 @@ def get_spike_indicator(cls, key, time): @classmethod def get_firing_rate(cls, key, time, multiunit=False): + """get time-dependent firing rate for units in the group + + Parameters + ---------- + key : dict + key to identify the group + time : np.ndarray + time vector for which to calculate the firing rate + multiunit : bool, optional + if True, return the multiunit firing rate for units in the group, by default False + smoothing_sigma : float, optional + standard deviation of gaussian filter to smooth firing rates in seconds, by default 0.015 + + Returns + ------- + np.ndarray + _description_ + """ spike_indicator = cls.get_spike_indicator(key, time) if spike_indicator.ndim == 1: spike_indicator = spike_indicator[:, np.newaxis] @@ -461,6 +564,13 @@ def get_firing_rate(cls, key, time, multiunit=False): ) def get_ahead_behind_distance(self): + """get the ahead-behind distance for the decoding model + + Returns + ------- + distance_metrics : np.ndarray + Information about the distance of the animal to the mental position. + """ # TODO: allow specification of specific time interval # TODO: allow specification of track graph # TODO: Handle decode intervals, store in table diff --git a/src/spyglass/decoding/v1/sorted_spikes.py b/src/spyglass/decoding/v1/sorted_spikes.py index c36959a00..310b6ca43 100644 --- a/src/spyglass/decoding/v1/sorted_spikes.py +++ b/src/spyglass/decoding/v1/sorted_spikes.py @@ -238,7 +238,14 @@ def make(self, key): DecodingOutput.insert1(orig_key, skip_duplicates=True) - def fetch_results(self): + def fetch_results(self) -> xr.Dataset: + """Retrieve the decoding results + + Returns + ------- + xr.Dataset + The decoding results (posteriors, etc.) + """ return SortedSpikesDetector.load_results(self.fetch1("results_path")) def fetch_model(self): @@ -246,6 +253,18 @@ def fetch_model(self): @staticmethod def fetch_environments(key): + """Fetch the environments for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + List[TrackGraph] + list of track graphs in the trained model + """ model_params = ( DecodingParameters & {"decoding_param_name": key["decoding_param_name"]} @@ -273,6 +292,18 @@ def fetch_environments(key): @staticmethod def _get_interval_range(key): + """Get the maximum range of model times in the encoding and decoding intervals + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + Tuple[float, float] + The minimum and maximum times for the model + """ encoding_interval = ( IntervalList & { @@ -302,6 +333,18 @@ def _get_interval_range(key): @staticmethod def fetch_position_info(key): + """Fetch the position information for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + Tuple[pd.DataFrame, List[str]] + The position information and the names of the position variables + """ position_group_key = { "position_group_name": key["position_group_name"], "nwb_file_name": key["nwb_file_name"], @@ -326,6 +369,18 @@ def fetch_position_info(key): @staticmethod def fetch_linear_position_info(key): + """Fetch the position information and project it onto the track graph + + Parameters + ---------- + key : dict + The decoding selection key + + Returns + ------- + pd.DataFrame + The linearized position information + """ environment = SortedSpikesDecodingV1.fetch_environments(key)[0] position_df = SortedSpikesDecodingV1.fetch_position_info(key)[0] @@ -352,6 +407,22 @@ def fetch_linear_position_info(key): @staticmethod def fetch_spike_data(key, filter_by_interval=True, time_slice=None): + """Fetch the spike times for the decoding model + + Parameters + ---------- + key : dict + The decoding selection key + filter_by_interval : bool, optional + Whether to filter for spike times in the model interval, by default True + time_slice : Slice, optional + User provided slice of time to restrict spikes to, by default None + + Returns + ------- + list[np.ndarray] + List of spike times for each unit in the model's spike group + """ spike_times = SortedSpikesGroup.fetch_spike_data(key) if not filter_by_interval: return spike_times @@ -371,6 +442,13 @@ def fetch_spike_data(key, filter_by_interval=True, time_slice=None): return new_spike_times def spike_times_sorted_by_place_field_peak(self, time_slice=None): + """Spike times of units sorted by place field peak location + + Parameters + ---------- + time_slice : Slice, optional + time range to limit returned spikes to, by default None + """ if time_slice is None: time_slice = slice(-np.inf, np.inf) @@ -395,8 +473,23 @@ def spike_times_sorted_by_place_field_peak(self, time_slice=None): ] for neuron_ind in neuron_sort_ind ] + return new_spike_times def get_ahead_behind_distance(self, track_graph=None, time_slice=None): + """Get the ahead-behind distance of the decoded position from the animal's actual position + + Parameters + ---------- + track_graph : TrackGraph, optional + environment track graph to project position on, by default None + time_slice : Slice, optional + time intrerval to restrict to, by default None + + Returns + ------- + distance_metrics : np.ndarray + Information about the distance of the animal to the mental position. + """ # TODO: store in table if time_slice is None: diff --git a/src/spyglass/spikesorting/analysis/v1/group.py b/src/spyglass/spikesorting/analysis/v1/group.py index 77a3fad41..ab824bebe 100644 --- a/src/spyglass/spikesorting/analysis/v1/group.py +++ b/src/spyglass/spikesorting/analysis/v1/group.py @@ -85,6 +85,13 @@ def filter_units( ) -> np.ndarray: """ Filter units based on labels + + labels: list of list of strings + list of labels for each unit + include_labels: list of strings + if provided, only units with any of these labels will be included + exclude_labels: list of strings + if provided, units with any of these labels will be excluded """ include_labels = np.unique(include_labels) exclude_labels = np.unique(exclude_labels) @@ -108,7 +115,23 @@ def filter_units( return include_mask @staticmethod - def fetch_spike_data(key, time_slice=None): + def fetch_spike_data( + key: dict, time_slice: list[float] = None + ) -> list[np.ndarray]: + """fetch spike times for units in the group + + Parameters + ---------- + key : dict + dictionary containing the group key + time_slice : list of float, optional + if provided, filter for spikes occurring in the interval [start, stop], by default None + + Returns + ------- + list of np.ndarray + list of spike times for each unit in the group + """ # get merge_ids for SpikeSortingOutput merge_ids = ( ( @@ -170,6 +193,20 @@ def fetch_spike_data(key, time_slice=None): @classmethod def get_spike_indicator(cls, key: dict, time: np.ndarray) -> np.ndarray: + """get spike indicator matrix for the group + + Parameters + ---------- + key : dict + key to identify the group + time : np.ndarray + time vector for which to calculate the spike indicator matrix + + Returns + ------- + np.ndarray + spike indicator matrix with shape (len(time), n_units) + """ time = np.asarray(time) min_time, max_time = time[[0, -1]] spike_times = cls.fetch_spike_data(key) @@ -195,6 +232,24 @@ def get_firing_rate( multiunit: bool = False, smoothing_sigma: float = 0.015, ) -> np.ndarray: + """get time-dependent firing rate for units in the group + + Parameters + ---------- + key : dict + key to identify the group + time : np.ndarray + time vector for which to calculate the firing rate + multiunit : bool, optional + if True, return the multiunit firing rate for units in the group, by default False + smoothing_sigma : float, optional + standard deviation of gaussian filter to smooth firing rates in seconds, by default 0.015 + + Returns + ------- + np.ndarray + time-dependent firing rate with shape (len(time), n_units) + """ spike_indicator = cls.get_spike_indicator(key, time) if spike_indicator.ndim == 1: spike_indicator = spike_indicator[:, np.newaxis] From 7edff6aa5cbfb05343471a761f41177f24268ae7 Mon Sep 17 00:00:00 2001 From: Chris Broz Date: Tue, 4 Jun 2024 17:30:13 -0500 Subject: [PATCH 47/60] Add Common Errors doc (#997) * Add Common Errors * Update changelog --- .github/pull_request_template.md | 2 +- CHANGELOG.md | 4 +- docs/mkdocs.yml | 9 +-- docs/src/misc/common_errs.md | 111 +++++++++++++++++++++++++++++++ docs/src/misc/index.md | 3 +- 5 files changed, 122 insertions(+), 7 deletions(-) create mode 100644 docs/src/misc/common_errs.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index aebccd6de..c94ea1c55 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -33,6 +33,6 @@ Table.alter() # Comment regarding the change - [ ] If release, I have updated the `CITATION.cff` - [ ] This PR makes edits to table definitions: (yes/no) - [ ] If table edits, I have included an `alter` snippet for release notes. -- [ ] If this PR makes changes to positon, I ran the relevant tests locally. +- [ ] If this PR makes changes to position, I ran the relevant tests locally. - [ ] I have updated the `CHANGELOG.md` with PR number and description. - [ ] I have added/edited docs/notebooks to reflect the changes diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e8d9a448..856dc930d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - Clean up old `TableChain.join` call in mixin delete. #982 - Add pytests for position pipeline, various `test_mode` exceptions #966 - Migrate `pip` dependencies from `environment.yml`s to `pyproject.toml` #966 +- Add documentation for common error messages #997 ### Pipelines @@ -29,7 +30,8 @@ - Common - Don't insert lab member when creating lab team #983 - Spikesorting - - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 + - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` + #994 - Update docstrings #996 ## [0.5.2] (April 22, 2024) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index acec4f829..c6394657e 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -71,13 +71,14 @@ nav: - MUA Detection: notebooks/50_MUA_Detection.ipynb - Miscellaneous: - Overview: misc/index.md + - Common Errors: misc/common_errs.md + - Database Management: misc/database_management.md + - Export: misc/export.md - FigURL: misc/figurl_views.md - - Session Groups: misc/session_groups.md - Insert Data: misc/insert_data.md - - Mixin: misc/mixin.md - Merge Tables: misc/merge_tables.md - - Database Management: misc/database_management.md - - Export: misc/export.md + - Mixin: misc/mixin.md + - Session Groups: misc/session_groups.md - API Reference: api/ # defer to gen-files + literate-nav - How to Contribute: contribute.md - Change Log: CHANGELOG.md diff --git a/docs/src/misc/common_errs.md b/docs/src/misc/common_errs.md new file mode 100644 index 000000000..34143b0f5 --- /dev/null +++ b/docs/src/misc/common_errs.md @@ -0,0 +1,111 @@ +# Common Errors + +## Debug Mode + +To enter into debug mode, you can add the following line to your code ... + +```python +__import__("pdb").set_trace() +``` + +This will set a breakpoint in your code at that line. When you run your code, it +will pause at that line and you can explore the variables in the current frame. +Commands in this mode include ... + +- `u` and `d` to move up and down the stack +- `l` to list the code around the current line +- `q` to quit the debugger +- `c` to continue running the code +- `h` for help, which will list all the commands + +`ipython` and jupyter notebooks can launch a debugger automatically at the last +error by running `%debug`. + +## Integrity + +```console +IntegrityError: Cannot add or update a child row: a foreign key constraint fails (`schema`.`_table`, CONSTRAINT `_table_ibfk_1` FOREIGN KEY (`parent_field`) REFERENCES `other_schema`.`parent_name` (`parent_field`) ON DELETE RESTRICT ON UPDATE CASCADE) +``` + +`IntegrityError` during `insert` means that some part of the key you're +inserting doesn't exist in the parent of the table you're inserting into. You +can explore which that may be by doing the following... + +```python +my_key = dict(value=key) # whatever you're inserting +MyTable.insert1(my_key) # error here +parents = MyTable.parents(as_objects=True) # get the parents as FreeTables +for parent in parents: # iterate through the parents, with only relevant fields + parent_key = {k: v for k, v in my_key.items() if k in parent.heading.names} + print(parent & parent_key) # restricted parent +``` + +If any of the printed tables are empty, you know you need to insert into that +table (or another ancestor up the pipeline) first. This code will not work if +there are aliases in the table (i.e., `proj` in the definition). In that case, +you'll need to modify your `parent_key` to reflect the renaming. + +The error message itself will tell you which table is the limiting parent. After +`REFERENCES` in the error message, you'll see the parent table and the column +that is causing the error. + +## Permission + +```console +('Insufficient privileges.', "INSERT command denied to user 'username'@'127.0.0.1' for table '_table_name'", 'INSERT INTO `schema_name`.`table_name`(`field1`,`field2`) VALUES (%s,%s)') +``` + +This is a MySQL error that means that either ... + +- You don't have access to the command you're trying to run (e.g., `INSERT`) +- You don't have access to this command on the schema you're trying to run it on + +To see what permissions you have, you can run the following ... + +```python +dj.conn().query("SHOW GRANTS FOR CURRENT_USER();").fetchall() +``` + +If you think you should have access to the command, you contact your database +administrator (e.g., Chris in the Frank Lab). Please share the output of the +above command with them. + +## Type + +```console +TypeError: example_function() got an unexpected keyword argument 'this_arg' +``` + +This means that you're calling a function with an argument that it doesn't +expect (e.g., `example_function(this_arg=5)`). You can check the function's +accepted arguments by running `help(example_function)`. + +```console +TypeError: 'NoneType' object is not iterable +``` + +This means that some function is trying to do something with an object of an +unexpected type. For example, if might by running `for item in variable: ...` +when `variable` is `None`. You can check the type of the variable by going into +debug mode and running `type(variable)`. + +## KeyError + +```console +KeyError: 'field_name' +``` + +This means that you're trying to access a key in a dictionary that doesn't +exist. You can check the keys of the dictionary by running `variable.keys()`. If +this is in your custom code, you can get a key and supply a default value if it +doesn't exist by running `variable.get('field_name', default_value)`. + +## DataJoint + +```console +DataJointError("Attempt to delete part table {part} before deleting from its master {master} first.") +``` + +This means that DataJoint's delete process found a part table with a foreign key +reference to the data you're trying to delete. You need to find the master table +listed and delete from that table first. diff --git a/docs/src/misc/index.md b/docs/src/misc/index.md index b9971a81c..51ef0007d 100644 --- a/docs/src/misc/index.md +++ b/docs/src/misc/index.md @@ -2,10 +2,11 @@ This folder contains miscellaneous supporting files documentation. +- [Common Errors](./common_errs.md) - [Database Management](./database_management.md) - [Export](./export.md) -- [figurl Views](./figurl_views.md) - [Insert Data](./insert_data.md) - [Merge Tables](./merge_tables.md) - [Mixin Class](./mixin.md) - [Session Groups](./session_groups.md) +- [figurl Views](./figurl_views.md) From d8e519698b421670fc812fcefa575a0b0aa1f934 Mon Sep 17 00:00:00 2001 From: Denisse Morales-Rodriguez <68555303+denissemorales@users.noreply.github.com> Date: Wed, 5 Jun 2024 13:43:16 -0700 Subject: [PATCH 48/60] Mua notebook (#998) * documented some of mua notebook * mua notebook documented * documented some of mua notebook * synced py script --- notebooks/50_MUA_Detection.ipynb | 495 ++++++++++++----------- notebooks/py_scripts/50_MUA_Detection.py | 66 ++- 2 files changed, 319 insertions(+), 242 deletions(-) diff --git a/notebooks/50_MUA_Detection.ipynb b/notebooks/50_MUA_Detection.ipynb index 5d7f9a061..2c8fa8511 100644 --- a/notebooks/50_MUA_Detection.ipynb +++ b/notebooks/50_MUA_Detection.ipynb @@ -1,5 +1,49 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MUA Detection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Overview" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "_Developer Note:_ if you may make a PR in the future, be sure to copy this\n", + "notebook, and use the `gitignore` prefix `temp` to avoid future conflicts.\n", + "\n", + "This is one notebook in a multi-part series on Spyglass.\n", + "\n", + "- To set up your Spyglass environment and database, see\n", + " [the Setup notebook](./00_Setup.ipynb).\n", + "- For additional info on DataJoint syntax, including table definitions and\n", + " inserts, see\n", + " [the Insert Data notebook](./01_Insert_Data.ipynb).\n", + "- Prior to running, please generate sorted spikes with the [spike sorting\n", + " pipeline](./02_Spike_Sorting.ipynb) and generate input position data with\n", + " either the [Trodes](./20_Position_Trodes.ipynb) or DLC notebooks\n", + " ([1](./21_Position_DLC_1.ipynb), [2](./22_Position_DLC_2.ipynb),\n", + " [3](./23_Position_DLC_3.ipynb)).\n", + "\n", + "The goal of this notebook is to populate the `MuaEventsV1` table, which depends `SortedSpikesGroup` and `PositionOutput`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Imports" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -9,8 +53,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2024-02-07 14:44:16,324][INFO]: Connecting edeno@lmf-db.cin.ucsf.edu:3306\n", - "[2024-02-07 14:44:16,357][INFO]: Connected edeno@lmf-db.cin.ucsf.edu:3306\n" + "[2024-06-04 16:03:33,573][INFO]: Connecting denissemorales@lmf-db.cin.ucsf.edu:3306\n", + "[2024-06-04 16:03:33,619][INFO]: Connected denissemorales@lmf-db.cin.ucsf.edu:3306\n", + "OMP: Info #277: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n" ] } ], @@ -25,6 +70,13 @@ "from spyglass.mua.v1.mua import MuaEventsV1, MuaEventsParameters" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Select Position Data" + ] + }, { "cell_type": "code", "execution_count": 2, @@ -32,79 +84,8 @@ "outputs": [ { "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - " \n", - "\n", - "
\n", - "

mua_param_name

\n", - " a name for this set of parameters\n", - "
\n", - "

mua_param_dict

\n", - " dictionary of parameters\n", - "
default=BLOB=
\n", - " \n", - "

Total: 1

\n", - " " - ], "text/plain": [ - "*mua_param_nam mua_param_\n", - "+------------+ +--------+\n", - "default =BLOB= \n", - " (Total: 1)" + "UUID('4eb59a18-045a-5768-d12e-b6473415ae1c')" ] }, "execution_count": 2, @@ -113,7 +94,27 @@ } ], "source": [ - "MuaEventsParameters()" + "from spyglass.position import PositionOutput\n", + "\n", + "#First, select the file of interest\n", + "nwb_copy_file_name = \"mediumnwb20230802_.nwb\"\n", + "\n", + "#Then, get position data\n", + "trodes_s_key = {\n", + " \"nwb_file_name\": nwb_copy_file_name,\n", + " \"interval_list_name\": \"pos 0 valid times\",\n", + " \"trodes_pos_params_name\": \"single_led_upsampled\",\n", + "}\n", + "\n", + "pos_merge_id = (PositionOutput.TrodesPosV1 & trodes_s_key).fetch1(\"merge_id\")\n", + "pos_merge_id" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Select Sorted Spikes Data" ] }, { @@ -127,21 +128,23 @@ "\n", " \n", " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "

export_id

\n", + " \n", + "
\n", + "

file_id

\n", + " \n", + "
\n", + "

dandiset_id

\n", + " \n", + "
\n", + "

filename

\n", + " \n", + "
\n", + "

dandi_path

\n", + " \n", + "
\n", + "

dandi_instance

\n", + " \n", + "
140214304minirec20230622_4W5BCN5Q1O.nwbsub-54321/sub-54321_ecephys.nwbdandi-staging
\n", + " \n", + "

Total: 1

\n", + " " + ], + "text/plain": [ + "*export_id *file_id dandiset_id filename dandi_path dandi_instance\n", + "+-----------+ +---------+ +------------+ +------------+ +------------+ +------------+\n", + "14 0 214304 minirec2023062 sub-54321/sub- dandi-staging \n", + " (Total: 1)" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "DandiPath() & {\"export_id\": 14}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When fetching data with spyglass, if a file is not available locally, syglass will automatically use \n", + "this information to stream the file from Dandi's server if available, providing an additional method\n", + " for sharing data with collaborators post-publication." + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/notebooks/py_scripts/05_Export.py b/notebooks/py_scripts/05_Export.py index b180832d4..2b8d588d3 100644 --- a/notebooks/py_scripts/05_Export.py +++ b/notebooks/py_scripts/05_Export.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.0 +# jupytext_version: 1.15.2 # kernelspec: # display_name: spy # language: python @@ -193,6 +193,66 @@ # can use to replicate the database entries you used in your analysis. # +# # Dandiset Upload + +# One benefit of the `Export` table is it provides a list of all raw data, intermediate analysis files, +# and final analysis files needed to generate a set of figures in a work. To aid in data-sharing standards, +# we have implemented tools to compile and upload this set of files as a Dandi dataset, which can then be used +# by spyglass to directly read the data from the Dandi database if not available locally. +# +# We will walk through the steps to do so here: + +#
+# Dandi data compliance (admins) +# +# >__WARNING__: The following describes spyglass utilities that require database admin privileges to run. It involves altering database values to correct for metadata format errors generated prior to spyglass insert. As such it has the potential to violate data integrity and should be used with caution. +# > +# >The Dandi database has specific formatting standards for metadata and nwb files. If there were violations of this standard in the +# raw nwbfile, spyglass will propagate them into all generated analysis files. In this case, running the code below will result in a list of error printouts and an error raised within the `validate_dandiset` function. +# > +# >To aid in correcting common formatting errors identified with changes in dandi standards, we have included the method +# ``` +# Export().prepare_files_for_export(paper_key) +# ``` +# >which will attempt to resolve these issues for a set of paper files. The code is __not__ guaranteed to address all errors found within the file, but can be used as a template for your specific errors +#
+# +# +# + +# The first step you will need to do is to [create a Dandi account](https://www.dandiarchive.org/handbook/16_account/). +# With this account you can then [register a new dandiset](https://dandiarchive.org/dandiset/create) by providing a name and basic metadata. +# Dandi's instructions for these steps are available [here](https://www.dandiarchive.org/handbook/13_upload/). +# +# The key information you will need from your registration is the `dandiset ID` and your account `api_key`, both of which are available from your registered account. +# +# Spyglass can then use this information to compile and upload the dandiset for your paper: + +# + +from spyglass.common.common_dandi import DandiPath + +dandiset_id = 214304 # use the value for you registered dandiset +dandi_api_key = ( + "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # key connected to your Dandi account +) + +DandiPath().compile_dandiset( + paper_key, + dandiset_id=dandiset_id, + dandi_api_key=dandi_api_key, + dandi_instance="dandi", +) # use dandi_instance="dandi-staging" to use dandi's dev server +# - + +# As well as uploading your dandiset, this function will populate the table `DandiPath` which will record the information needed to access a given analysis file from the Dandi server +# + +DandiPath() & {"export_id": 14} + +# When fetching data with spyglass, if a file is not available locally, syglass will automatically use +# this information to stream the file from Dandi's server if available, providing an additional method +# for sharing data with collaborators post-publication. + # ## Up Next # diff --git a/src/spyglass/common/common_dandi.py b/src/spyglass/common/common_dandi.py new file mode 100644 index 000000000..e3c2836e1 --- /dev/null +++ b/src/spyglass/common/common_dandi.py @@ -0,0 +1,264 @@ +import os +import shutil +from pathlib import Path + +import datajoint as dj +import fsspec +import h5py +import pynwb +from fsspec.implementations.cached import CachingFileSystem + +try: + import dandi.download + import dandi.organize + import dandi.upload + import dandi.validate + from dandi.consts import known_instances + from dandi.dandiapi import DandiAPIClient + from dandi.metadata.nwb import get_metadata + from dandi.organize import OrganizeInvalid + from dandi.validate_types import Severity + +except ImportError as e: + ( + dandi.download, + dandi.organize, + dandi.upload, + dandi.validate, + known_instances, + DandiAPIClient, + get_metadata, + OrganizeInvalid, + Severity, + ) = [None] * 9 + logger.warning(e) + + +from spyglass.common.common_usage import Export +from spyglass.settings import export_dir +from spyglass.utils import SpyglassMixin, logger + +schema = dj.schema("common_dandi") + + +@schema +class DandiPath(SpyglassMixin, dj.Manual): + definition = """ + -> Export.File + --- + dandiset_id: varchar(16) + filename: varchar(255) + dandi_path: varchar(255) + dandi_instance = "dandi": varchar(32) + """ + + def fetch_file_from_dandi(self, key: dict): + dandiset_id, dandi_path, dandi_instance = (self & key).fetch1( + "dandiset_id", "dandi_path", "dandi_instance" + ) + dandiset_id = str(dandiset_id) + # get the s3 url from Dandi + with DandiAPIClient( + dandi_instance=known_instances[dandi_instance], + ) as client: + asset = client.get_dandiset(dandiset_id).get_asset_by_path( + dandi_path + ) + s3_url = asset.get_content_url(follow_redirects=1, strip_query=True) + + # stream the file from s3 + # first, create a virtual filesystem based on the http protocol + fs = fsspec.filesystem("http") + + # create a cache to save downloaded data to disk (optional) + fsspec_file = CachingFileSystem( + fs=fs, + cache_storage=f"{export_dir}/nwb-cache", # Local folder for the cache + ) + + # Open and return the file + fs_file = fsspec_file.open(s3_url, "rb") + io = pynwb.NWBHDF5IO(file=h5py.File(fs_file)) + nwbfile = io.read() + return (io, nwbfile) + + def compile_dandiset( + self, + key: dict, + dandiset_id: str, + dandi_api_key: str = None, + dandi_instance: str = "dandi", + ): + """Compile a Dandiset from the export. + Parameters + ---------- + key : dict + ExportSelection key + dandiset_id : str + Dandiset ID generated by the user on the dadndi server + dandi_api_key : str, optional + API key for the dandi server. Optional if the environment variable + DANDI_API_KEY is set. + dandi_instance : What instance of Dandi the dandiset is on. Defaults to the dev server + """ + key = (Export & key).fetch1("KEY") + paper_id = (Export & key).fetch1("paper_id") + if self & key: + raise ValueError( + "Adding new files to an existing dandiset is not permitted. " + + f"Please rerun after deleting existing entries for {key}" + ) + + # make a temp dir with symbolic links to the export files + source_files = (Export.File() & key).fetch("file_path") + paper_dir = f"{export_dir}/{paper_id}" + os.makedirs(paper_dir, exist_ok=True) + destination_dir = f"{paper_dir}/dandiset_{paper_id}" + dandiset_dir = f"{paper_dir}/{dandiset_id}" + + # check if pre-existing directories for dandi export exist. Remove if so to continue + for dandi_dir in destination_dir, dandiset_dir: + if os.path.exists(dandi_dir): + from datajoint.utils import user_choice + + if ( + user_choice( + "Pre-existing dandi export dir exist." + + f"Delete existing export folder: {dandi_dir}", + default="no", + ) + == "yes" + ): + shutil.rmtree(dandi_dir) + continue + raise RuntimeError( + f"Directory must be removed prior to dandi export to ensure dandi-compatability: {dandi_dir}" + ) + + os.makedirs(destination_dir, exist_ok=False) + for file in source_files: + if not os.path.exists( + f"{destination_dir}/{os.path.basename(file)}" + ): + os.symlink(file, f"{destination_dir}/{os.path.basename(file)}") + + # validate the dandiset + validate_dandiset(destination_dir, ignore_external_files=True) + + # given dandiset_id, download the dandiset to the export_dir + url = f"{known_instances[dandi_instance].gui}/dandiset/{dandiset_id}/draft" + dandi.download.download(url, output_dir=paper_dir) + + # organize the files in the dandiset directory + dandi.organize.organize( + destination_dir, dandiset_dir, invalid=OrganizeInvalid.WARN + ) + + # get the dandi name translations + translations = translate_name_to_dandi(destination_dir) + + # upload the dandiset to the dandi server + if dandi_api_key: + os.environ["DANDI_API_KEY"] = dandi_api_key + dandi.upload.upload( + [dandiset_dir], + dandi_instance=dandi_instance, + ) + logger.info(f"Dandiset {dandiset_id} uploaded") + # insert the translations into the dandi table + translations = [ + { + **( + Export.File() & key & f"file_path LIKE '%{t['filename']}'" + ).fetch1(), + **t, + "dandiset_id": dandiset_id, + "dandi_instance": dandi_instance, + } + for t in translations + ] + self.insert(translations, ignore_extra_fields=True) + + +def _get_metadata(path): + # taken from definition within dandi.organize.organize + try: + meta = get_metadata(path) + except Exception as exc: + meta = {} + raise RuntimeError("Failed to get metadata for %s: %s", path, exc) + meta["path"] = path + return meta + + +def translate_name_to_dandi(folder): + """Uses dandi.organize to translate filenames to dandi paths + + *Note* The name for a given file is dependent on that of all files in the folder + + Parameters + ---------- + folder : str + location of files to be translated + + Returns + ------- + dict + dictionary of filename to dandi_path translations + """ + files = Path(folder).glob("*") + metadata = list(map(_get_metadata, files)) + metadata, skip_invalid = dandi.organize.filter_invalid_metadata_rows( + metadata + ) + metadata = dandi.organize.create_unique_filenames_from_metadata( + metadata, required_fields=None + ) + return [ + {"filename": Path(file["path"]).name, "dandi_path": file["dandi_path"]} + for file in metadata + ] + + +def validate_dandiset( + folder, min_severity="ERROR", ignore_external_files=False +): + """Validate the dandiset directory + + Parameters + ---------- + folder : str + location of dandiset to be validated + min_severity : str + minimum severity level for errors to be reported, threshold for failed Dandi upload is "ERROR" + ignore_external_files : bool + whether to ignore external file errors. Used if validating + before the organize step + """ + validator_result = dandi.validate.validate(folder) + min_severity_value = Severity[min_severity].value + + filtered_results = [ + i + for i in validator_result + if i.severity is not None and i.severity.value >= min_severity_value + ] + + if ignore_external_files: + # ignore external file errors. will be resolved during organize step + filtered_results = [ + i + for i in filtered_results + if not i.message.startswith("Path is not inside") + ] + + if filtered_results: + raise ValueError( + "Validation failed\n\t" + + "\n\t".join( + [ + f"{result.severity}: {result.message} in {result.path}" + for result in filtered_results + ] + ) + ) diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index de2ad8079..bd8a90262 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -129,6 +129,20 @@ def get_djuser_name(cls, dj_user) -> str: return query[0] + def check_admin_privilege( + cls, + error_message: str = "User does not have database admin privileges", + ): + """Check if a user has admin privilege. + + Parameters + ---------- + error_message: str + The error message to display if the user is not an admin. + """ + if dj.config["database.user"] not in cls.admin: + raise PermissionError(error_message) + @schema class LabTeam(SpyglassMixin, dj.Manual): diff --git a/src/spyglass/common/common_usage.py b/src/spyglass/common/common_usage.py index dae4f7842..9d408b5bc 100644 --- a/src/spyglass/common/common_usage.py +++ b/src/spyglass/common/common_usage.py @@ -12,12 +12,17 @@ import datajoint as dj from datajoint import FreeTable from datajoint import config as dj_config +from pynwb import NWBHDF5IO from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile from spyglass.settings import export_dir from spyglass.utils import SpyglassMixin, logger from spyglass.utils.dj_graph import RestrGraph -from spyglass.utils.dj_helper_fn import unique_dicts +from spyglass.utils.dj_helper_fn import ( + make_file_obj_id_unique, + unique_dicts, + update_analysis_for_dandi_standard, +) schema = dj.schema("common_usage") @@ -408,3 +413,33 @@ def write_export( ) # TODO: export conda env + + def prepare_files_for_export(self, key, **kwargs): + """Resolve common known errors to make a set of analysis + files dandi compliant + + Parameters + ---------- + key : dict + restriction for a single entry of the Export table + """ + key = (self & key).fetch1("KEY") + self._make_fileset_ids_unique(key) + file_list = (self.File() & key).fetch("file_path") + for file in file_list: + update_analysis_for_dandi_standard(file, **kwargs) + + def _make_fileset_ids_unique(self, key): + """Make the object_id of each nwb in a dataset unique""" + key = (self & key).fetch1("KEY") + file_list = (self.File() & key).fetch("file_path") + unique_object_ids = [] + for file_path in file_list: + with NWBHDF5IO(file_path, "r") as io: + nwb = io.read() + object_id = nwb.object_id + if object_id not in unique_object_ids: + unique_object_ids.append(object_id) + else: + new_id = make_file_obj_id_unique(file_path) + unique_object_ids.append(new_id) diff --git a/src/spyglass/sharing/sharing_kachery.py b/src/spyglass/sharing/sharing_kachery.py index 8844aad4b..aa3c22747 100644 --- a/src/spyglass/sharing/sharing_kachery.py +++ b/src/spyglass/sharing/sharing_kachery.py @@ -163,7 +163,9 @@ def make(self, key): KacheryZone.reset_zone() @staticmethod - def download_file(analysis_file_name: str) -> bool: + def download_file( + analysis_file_name: str, permit_fail: bool = False + ) -> bool: """Download the specified analysis file and associated linked files from kachery-cloud if possible @@ -211,10 +213,10 @@ def download_file(analysis_file_name: str) -> bool: raise Exception( f"Linked file {linked_file_path} cannot be downloaded" ) - if not downloaded: + if not downloaded and not permit_fail: raise Exception(f"{analysis_file_name} cannot be downloaded") - return True + return downloaded def share_data_to_kachery( diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 89b1950cd..3fa18191c 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -2,14 +2,19 @@ import inspect import os +from pathlib import Path from typing import List, Type, Union +from uuid import uuid4 import datajoint as dj +import h5py import numpy as np from datajoint.user_tables import UserTable from spyglass.utils.logging import logger -from spyglass.utils.nwb_helper_fn import get_nwb_file +from spyglass.utils.nwb_helper_fn import file_from_dandi, get_nwb_file + +STR_DTYPE = h5py.special_dtype(vlen=str) # Tables that should be excluded from the undirected graph when finding paths # for TableChain objects and searching for an upstream key. @@ -229,6 +234,10 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): kwargs["as_dict"] = True # force return as dictionary tbl, attr_name = nwb_master + if "analysis" in attr_name: + file_name_attr = "analysis_file_name" + else: + file_name_attr = "nwb_file_name" if not attrs: attrs = query_expression.heading.names @@ -243,9 +252,18 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): # This also opens the file and stores the file object get_nwb_file(file_path) - rec_dicts = ( - query_expression * tbl.proj(nwb2load_filepath=attr_name) - ).fetch(*attrs, "nwb2load_filepath", **kwargs) + query_table = query_expression * tbl.proj(nwb2load_filepath=attr_name) + rec_dicts = query_table.fetch(*attrs, **kwargs) + # get filepath for each. Use datajoint for checksum if local + for rec_dict in rec_dicts: + file_path = file_path_fn(rec_dict[file_name_attr]) + if file_from_dandi(file_path): + # skip the filepath checksum if streamed from Dandi + rec_dict["nwb2load_filepath"] = file_path + continue + rec_dict["nwb2load_filepath"] = (query_table & rec_dict).fetch1( + "nwb2load_filepath" + ) if not rec_dicts or not np.any( ["object_id" in key for key in rec_dicts[0]] @@ -289,3 +307,161 @@ def get_child_tables(table): ) for s in table.children() ] + + +def update_analysis_for_dandi_standard( + filepath: str, + age: str = "P4M/P8M", +): + """Function to resolve common nwb file format errors within the database + + Parameters + ---------- + filepath : str + abs path to the file to edit + age : str, optional + age to assign animal if missing, by default "P4M/P8M" + """ + from spyglass.common import LabMember + + LabMember().check_admin_privilege( + error_message="Admin permissions required to edit existing analysis files" + ) + file_name = filepath.split("/")[-1] + # edit the file + with h5py.File(filepath, "a") as file: + sex_value = file["/general/subject/sex"][()].decode("utf-8") + if not sex_value in ["Female", "Male", "F", "M", "O", "U"]: + raise ValueError(f"Unexpected value for sex: {sex_value}") + + if len(sex_value) > 1: + new_sex_value = sex_value[0].upper() + logger.info( + f"Adjusting subject sex: '{sex_value}' -> '{new_sex_value}'" + ) + file["/general/subject/sex"][()] = new_sex_value + + # replace subject species value "Rat" with "Rattus norvegicus" + species_value = file["/general/subject/species"][()].decode("utf-8") + if species_value == "Rat": + new_species_value = "Rattus norvegicus" + print( + f"Adjusting subject species from '{species_value}' to '{new_species_value}'." + ) + file["/general/subject/species"][()] = new_species_value + + if not ( + len(species_value.split(" ")) == 2 or "NCBITaxon" in species_value + ): + raise ValueError( + f"Dandi upload requires species either be in Latin binomial form (e.g., 'Mus musculus' and 'Homo sapiens')" + + "or be a NCBI taxonomy link (e.g., 'http://purl.obolibrary.org/obo/NCBITaxon_280675')." + + f"\n Please update species value of: {species_value}" + ) + + # add subject age dataset "P4M/P8M" + if "age" not in file["/general/subject"]: + new_age_value = age + logger.info( + f"Adding missing subject age, set to '{new_age_value}'." + ) + file["/general/subject"].create_dataset( + name="age", data=new_age_value, dtype=STR_DTYPE + ) + + # format name to "Last, First" + experimenter_value = file["/general/experimenter"][:].astype(str) + new_experimenter_value = dandi_format_names(experimenter_value) + if experimenter_value != new_experimenter_value: + new_experimenter_value = new_experimenter_value.astype(STR_DTYPE) + logger.info( + f"Adjusting experimenter from {experimenter_value} to {new_experimenter_value}." + ) + file["/general/experimenter"][:] = new_experimenter_value + + # update the datajoint external store table to reflect the changes + _resolve_external_table(filepath, file_name) + + +def dandi_format_names(experimenter: List) -> List: + """Make names compliant with dandi standard of "Last, First" + + Parameters + ---------- + experimenter : List + List of experimenter names + + Returns + ------- + List + reformatted list of experimenter names + """ + for i, name in enumerate(experimenter): + parts = name.split(" ") + new_name = " ".join( + parts[:-1], + ) + new_name = f"{parts[-1]}, {new_name}" + experimenter[i] = new_name + return experimenter + + +def _resolve_external_table( + filepath: str, file_name: str, location: str = "analysis" +): + """Function to resolve database vs. file property discrepancies. + + WARNING: This should only be used when editing file metadata. Can violate data + integrity if impproperly used. + + Parameters + ---------- + filepath : str + abs path to the file to edit + file_name : str + name of the file to edit + location : str, optional + which external table the file is in, current options are ["analysis", "raw], by default "analysis" + """ + from spyglass.common import LabMember + from spyglass.common.common_nwbfile import schema as common_schema + + LabMember().check_admin_privilege( + error_message="Please contact database admin to edit database checksums" + ) + external_table = ( + common_schema.external[location] & f"filepath LIKE '%{file_name}'" + ) + external_key = external_table.fetch1() + external_key.update( + { + "size": Path(filepath).stat().st_size, + "contents_hash": dj.hash.uuid_from_file(filepath), + } + ) + common_schema.external[location].update1(external_key) + + +def make_file_obj_id_unique(nwb_path: str): + """Make the top-level object_id attribute of the file unique + + Parameters + ---------- + nwb_path : str + path to the NWB file + + Returns + ------- + str + the new object_id + """ + from spyglass.common.common_lab import LabMember # noqa: F401 + + LabMember().check_admin_privilege( + error_message="Admin permissions required to edit existing analysis files" + ) + new_id = str(uuid4()) + with h5py.File(nwb_path, "a") as f: + f.attrs["object_id"] = new_id + _resolve_external_table(nwb_path, nwb_path.split("/")[-1]) + return new_id diff --git a/src/spyglass/utils/nwb_helper_fn.py b/src/spyglass/utils/nwb_helper_fn.py index de7671b42..d5b6e4624 100644 --- a/src/spyglass/utils/nwb_helper_fn.py +++ b/src/spyglass/utils/nwb_helper_fn.py @@ -59,9 +59,27 @@ def get_nwb_file(nwb_file_path): # the download functions assume just the filename, so we need to # get that from the path if not AnalysisNwbfileKachery.download_file( - os.path.basename(nwb_file_path) + os.path.basename(nwb_file_path), permit_fail=True ): - return None + logger.info( + "NWB file not found in kachery; checking Dandi for " + + f"{nwb_file_path}" + ) + # Dandi fallback SB 2024-04-03 + from ..common.common_dandi import DandiPath + + dandi_key = {"filename": os.path.basename(nwb_file_path)} + if not DandiPath & dandi_key: + # If not in Dandi, then we can't find the file + raise FileNotFoundError( + f"NWB file not found in kachery or Dandi: {os.path.basename(nwb_file_path)}." + ) + io, nwbfile = DandiPath().fetch_file_from_dandi( + dandi_key + ) # TODO: consider case where file in multiple dandisets + __open_nwb_files[nwb_file_path] = (io, nwbfile) + return nwbfile + # now open the file io = pynwb.NWBHDF5IO( path=nwb_file_path, mode="r", load_namespaces=True @@ -72,6 +90,17 @@ def get_nwb_file(nwb_file_path): return nwbfile +def file_from_dandi(filepath): + """helper to determine if open file is streamed from Dandi""" + if filepath not in __open_nwb_files: + return False + build_keys = __open_nwb_files[filepath][0]._HDF5IO__built.keys() + for k in build_keys: + if "HTTPFileSystem" in k: + return True + return False + + def get_config(nwb_file_path): """Return a dictionary of config settings for the given NWB file. If the file does not exist, return an empty dict. From 2de1d2b00e374f1027c5e2725db7de31ac3e0303 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Thu, 6 Jun 2024 13:05:26 -0700 Subject: [PATCH 50/60] Minor fixes (#999) * give analysis nwb new uuid when created * fix function argument * update changelog --- CHANGELOG.md | 1 + notebooks/01_Insert_Data.ipynb | 2 +- notebooks/py_scripts/01_Insert_Data.py | 4 ++-- src/spyglass/common/common_nwbfile.py | 5 +++++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a35f207c0..b898826f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - Common - `PositionVideo` table now inserts into self after `make` #966 + - Files created by `AnalysisNwbfile.create()` receive new object_id #999 - Decoding: Default values for classes on `ImportError` #966 - DLC - Allow dlc without pre-existing tracking data #973, #975 diff --git a/notebooks/01_Insert_Data.ipynb b/notebooks/01_Insert_Data.ipynb index 2a2297642..23c208cdf 100644 --- a/notebooks/01_Insert_Data.ipynb +++ b/notebooks/01_Insert_Data.ipynb @@ -1160,7 +1160,7 @@ } ], "source": [ - "sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_error=False)" + "sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_err=False)" ] }, { diff --git a/notebooks/py_scripts/01_Insert_Data.py b/notebooks/py_scripts/01_Insert_Data.py index 870c6907a..48ddae39b 100644 --- a/notebooks/py_scripts/01_Insert_Data.py +++ b/notebooks/py_scripts/01_Insert_Data.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.0 +# jupytext_version: 1.15.2 # kernelspec: # display_name: spy # language: python @@ -216,7 +216,7 @@ # will still leave entries from parent tables. # -sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_error=False) +sgi.insert_sessions(nwb_file_name, rollback_on_fail=False, raise_err=False) # ## Inspecting the data # diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index d5bba9e51..82070d5fb 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -4,6 +4,7 @@ import string from pathlib import Path from time import time +from uuid import uuid4 import datajoint as dj import h5py @@ -223,6 +224,10 @@ def create(self, nwb_file_name): if alter_source_script: self._alter_spyglass_version(analysis_file_abs_path) + # create a new object id for the file + with h5py.File(nwb_file_abspath, "a") as f: + f.attrs["object_id"] = str(uuid4()) + # change the permissions to only allow owner to write permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH os.chmod(analysis_file_abs_path, permissions) From 4a1b40e728f0e546524f62a5e399dbc72b590292 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Mon, 10 Jun 2024 12:57:57 -0700 Subject: [PATCH 51/60] Fix bug in change in analysis_file object_id (#1004) * fix bug in change in analysis_file_object_id * update changelog --- CHANGELOG.md | 2 +- src/spyglass/common/common_nwbfile.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b898826f2..36c3523f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ - Common - `PositionVideo` table now inserts into self after `make` #966 - - Files created by `AnalysisNwbfile.create()` receive new object_id #999 + - Files created by `AnalysisNwbfile.create()` receive new object_id #999, #1004 - Decoding: Default values for classes on `ImportError` #966 - DLC - Allow dlc without pre-existing tracking data #973, #975 diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index 82070d5fb..bcdc50c28 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -225,7 +225,7 @@ def create(self, nwb_file_name): self._alter_spyglass_version(analysis_file_abs_path) # create a new object id for the file - with h5py.File(nwb_file_abspath, "a") as f: + with h5py.File(analysis_file_abs_path, "a") as f: f.attrs["object_id"] = str(uuid4()) # change the permissions to only allow owner to write From 5d957f1cc3699fe21d63c3ce2f046a4b237ab71e Mon Sep 17 00:00:00 2001 From: Chris Broz Date: Tue, 11 Jun 2024 12:50:44 -0500 Subject: [PATCH 52/60] Remove classes for usused tables (#1003) * #976 * Remove notebook reference --- CHANGELOG.md | 12 +- notebooks/21_DLC.ipynb | 49 ----- notebooks/50_MUA_Detection.ipynb | 12 +- notebooks/py_scripts/21_DLC.py | 23 --- src/spyglass/common/common_ephys.py | 10 - src/spyglass/position/position_merge.py | 177 ------------------ src/spyglass/spikesorting/v0/__init__.py | 1 - .../spikesorting/v0/spikesorting_curation.py | 95 ---------- 8 files changed, 15 insertions(+), 364 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36c3523f6..15cf86478 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,18 +24,22 @@ - Common - `PositionVideo` table now inserts into self after `make` #966 - - Files created by `AnalysisNwbfile.create()` receive new object_id #999, #1004 + - Don't insert lab member when creating lab team #983 + - Files created by `AnalysisNwbfile.create()` receive new object_id #999 + - Remove unused `ElectrodeBrainRegion` table #1003 + - Files created by `AnalysisNwbfile.create()` receive new object_id #999, + #1004 - Decoding: Default values for classes on `ImportError` #966 -- DLC +- Position - Allow dlc without pre-existing tracking data #973, #975 - Raise `KeyError` for missing input parameters across helper funcs #966 - `DLCPosVideo` table now inserts into self after `make` #966 -- Common - - Don't insert lab member when creating lab team #983 + - Remove unused `PositionVideoSelection` and `PositionVideo` tables #1003 - Spikesorting - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 - Update docstrings #996 + - Remove unused `UnitInclusionParameters` table from `spikesorting.v0` #1003 ## [0.5.2] (April 22, 2024) diff --git a/notebooks/21_DLC.ipynb b/notebooks/21_DLC.ipynb index ffc0d450c..b976ae5eb 100644 --- a/notebooks/21_DLC.ipynb +++ b/notebooks/21_DLC.ipynb @@ -2097,55 +2097,6 @@ "(PositionOutput.DLCPosV1() & dlc_key).fetch1_dataframe()" ] }, - { - "cell_type": "markdown", - "id": "e48c7a4e-0bbc-4101-baf2-e84f1f5739d5", - "metadata": {}, - "source": [ - "#### [PositionVideo](#TableOfContents)\n" - ] - }, - { - "cell_type": "markdown", - "id": "388e6602-8e80-47fa-be78-4ae120d52e41", - "metadata": {}, - "source": [ - "We can use the `PositionVideo` table to create a video that overlays just the\n", - "centroid and orientation on the video. This table uses the parameter `plot` to\n", - "determine whether to plot the entry deriving from the DLC arm or from the Trodes\n", - "arm of the position pipeline. This parameter also accepts 'all', which will plot\n", - "both (if they exist) in order to compare results.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2a782ce-0a14-4725-887f-ae6f341635f8", - "metadata": {}, - "outputs": [], - "source": [ - "sgp.PositionVideoSelection().insert1(\n", - " {\n", - " \"nwb_file_name\": \"J1620210604_.nwb\",\n", - " \"interval_list_name\": \"pos 13 valid times\",\n", - " \"trodes_position_id\": 0,\n", - " \"dlc_position_id\": 1,\n", - " \"plot\": \"DLC\",\n", - " \"output_dir\": \"/home/dgramling/Src/\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c32993e7-5b32-46f9-a2f9-9634aef785f2", - "metadata": {}, - "outputs": [], - "source": [ - "sgp.PositionVideo.populate({\"plot\": \"DLC\"})" - ] - }, { "cell_type": "markdown", "id": "be097052-3789-4d55-aca1-e44d426c39b4", diff --git a/notebooks/50_MUA_Detection.ipynb b/notebooks/50_MUA_Detection.ipynb index 2c8fa8511..1da12af6a 100644 --- a/notebooks/50_MUA_Detection.ipynb +++ b/notebooks/50_MUA_Detection.ipynb @@ -96,10 +96,10 @@ "source": [ "from spyglass.position import PositionOutput\n", "\n", - "#First, select the file of interest\n", + "# First, select the file of interest\n", "nwb_copy_file_name = \"mediumnwb20230802_.nwb\"\n", "\n", - "#Then, get position data\n", + "# Then, get position data\n", "trodes_s_key = {\n", " \"nwb_file_name\": nwb_copy_file_name,\n", " \"interval_list_name\": \"pos 0 valid times\",\n", @@ -215,7 +215,7 @@ " SortedSpikesGroup,\n", ")\n", "\n", - "#Select sorted spikes data\n", + "# Select sorted spikes data\n", "sorted_spikes_group_key = {\n", " \"nwb_file_name\": nwb_copy_file_name,\n", " \"sorted_spikes_group_name\": \"test_group\",\n", @@ -880,10 +880,12 @@ "import numpy as np\n", "\n", "fig, axes = plt.subplots(2, 1, sharex=True, figsize=(15, 4))\n", - "speed = MuaEventsV1.get_speed(mua_key) #get speed from MuaEventsV1 table\n", + "speed = MuaEventsV1.get_speed(mua_key) # get speed from MuaEventsV1 table\n", "time = speed.index.to_numpy()\n", "speed = speed.to_numpy()\n", - "multiunit_firing_rate = MuaEventsV1.get_firing_rate(mua_key, time) #get firing rate from MuaEventsV1 table\n", + "multiunit_firing_rate = MuaEventsV1.get_firing_rate(\n", + " mua_key, time\n", + ") # get firing rate from MuaEventsV1 table\n", "\n", "time_slice = slice(\n", " np.searchsorted(time, mua_times.loc[10].start_time) - 1_000,\n", diff --git a/notebooks/py_scripts/21_DLC.py b/notebooks/py_scripts/21_DLC.py index 63ffe4d0c..8a55441e8 100644 --- a/notebooks/py_scripts/21_DLC.py +++ b/notebooks/py_scripts/21_DLC.py @@ -772,29 +772,6 @@ (PositionOutput.DLCPosV1() & dlc_key).fetch1_dataframe() -# #### [PositionVideo](#TableOfContents) -# - -# We can use the `PositionVideo` table to create a video that overlays just the -# centroid and orientation on the video. This table uses the parameter `plot` to -# determine whether to plot the entry deriving from the DLC arm or from the Trodes -# arm of the position pipeline. This parameter also accepts 'all', which will plot -# both (if they exist) in order to compare results. -# - -sgp.PositionVideoSelection().insert1( - { - "nwb_file_name": "J1620210604_.nwb", - "interval_list_name": "pos 13 valid times", - "trodes_position_id": 0, - "dlc_position_id": 1, - "plot": "DLC", - "output_dir": "/home/dgramling/Src/", - } -) - -sgp.PositionVideo.populate({"plot": "DLC"}) - # ### _CONGRATULATIONS!!_ # # Please treat yourself to a nice tea break :-) diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index f9abff647..7e394bd2d 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -907,13 +907,3 @@ def fetch1_dataframe(self, *attrs, **kwargs): filtered_nwb["filtered_data"].timestamps, name="time" ), ) - - -@schema -class ElectrodeBrainRegion(SpyglassMixin, dj.Manual): - definition = """ - # Table with brain region of electrodes determined post-experiment e.g. via histological analysis or CT - -> Electrode - --- - -> BrainRegion - """ diff --git a/src/spyglass/position/position_merge.py b/src/spyglass/position/position_merge.py index 72e330af6..ea2d574a2 100644 --- a/src/spyglass/position/position_merge.py +++ b/src/spyglass/position/position_merge.py @@ -1,7 +1,3 @@ -import functools as ft -import os -from pathlib import Path - import datajoint as dj import numpy as np import pandas as pd @@ -87,176 +83,3 @@ def fetch1_dataframe(self): & key ) return query.fetch1_dataframe() - - -@schema -class PositionVideoSelection(SpyglassMixin, dj.Manual): - definition = """ - nwb_file_name : varchar(255) # name of the NWB file - interval_list_name : varchar(170) # descriptive name of this interval list - plot_id : int - plot : varchar(40) # Which position info to overlay on video file - --- - output_dir : varchar(255) # directory where to save output video - """ - - # NOTE: See #630, #664. Excessive key length. - - def insert1(self, key, **kwargs): - key["plot_id"] = self.get_plotid(key) - super().insert1(key, **kwargs) - - def get_plotid(self, key): - fields = list(self.primary_key) - temp_key = {k: val for k, val in key.items() if k in fields} - plot_id = temp_key.get("plot_id", None) - if plot_id is None: - plot_id = ( - dj.U().aggr(self & temp_key, n="max(plot_id)").fetch1("n") or 0 - ) + 1 - else: - id = (self & temp_key).fetch("plot_id") - if len(id) > 0: - plot_id = max(id) + 1 - else: - plot_id = max(0, plot_id) - return plot_id - - -@schema -class PositionVideo(SpyglassMixin, dj.Computed): - """Creates a video of the computed head position and orientation as well as - the original LED positions overlaid on the video of the animal. - - Use for debugging the effect of position extraction parameters.""" - - definition = """ - -> PositionVideoSelection - --- - """ - - def make(self, key): - raise NotImplementedError("work in progress -DPG") - - plot = key.get("plot") - if plot not in ["DLC", "Trodes", "Common", "All"]: - raise ValueError(f"Plot {key['plot']} not supported") - # CBroz: I was told only tests should `assert`, code should `raise` - - M_TO_CM = 100 - output_dir = (PositionVideoSelection & key).fetch1("output_dir") - - logger.info("Loading position data...") - # raw_position_df = ( - # RawPosition() - # & { - # "nwb_file_name": key["nwb_file_name"], - # "interval_list_name": key["interval_list_name"], - # } - # ).fetch1_dataframe() - - query = { - "nwb_file_name": key["nwb_file_name"], - "interval_list_name": key["interval_list_name"], - } - merge_entries = { - "DLC": PositionOutput.DLCPosV1 & query, - "Trodes": PositionOutput.TrodesPosV1 & query, - "Common": PositionOutput.CommonPos & query, - } - - position_mean_dict = {} - if plot == "All": - # Check which entries exist in PositionOutput - merge_dict = {} - for source, entries in merge_entries.items(): - if entries: - merge_dict[source] = entries.fetch1_dataframe().drop( - columns=["velocity_x", "velocity_y", "speed"] - ) - - pos_df = ft.reduce( - lambda left, right,: pd.merge( - left[1], - right[1], - left_index=True, - right_index=True, - suffixes=[f"_{left[0]}", f"_{right[0]}"], - ), - merge_dict.items(), - ) - position_mean_dict = { - source: { - "position": np.asarray( - pos_df[[f"position_x_{source}", f"position_y_{source}"]] - ), - "orientation": np.asarray( - pos_df[[f"orientation_{source}"]] - ), - } - for source in merge_dict.keys() - } - else: - if plot == "DLC": - # CBroz - why is this extra step needed for DLC? - pos_df_key = merge_entries[plot].fetch1(as_dict=True) - pos_df = (PositionOutput & pos_df_key).fetch1_dataframe() - elif plot in ["Trodes", "Common"]: - pos_df = merge_entries[plot].fetch1_dataframe() - - position_mean_dict[plot]["position"] = np.asarray( - pos_df[["position_x", "position_y"]] - ) - position_mean_dict[plot]["orientation"] = np.asarray( - pos_df[["orientation"]] - ) - - logger.info("Loading video data...") - epoch = int("".join(filter(str.isdigit, key["interval_list_name"]))) + 1 - - ( - video_path, - video_filename, - meters_per_pixel, - video_time, - ) = get_video_path( - {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} - ) - video_dir = os.path.dirname(video_path) + "/" - video_frame_col_name = [ - col for col in pos_df.columns if "video_frame_ind" in col - ][0] - video_frame_inds = pos_df[video_frame_col_name].astype(int).to_numpy() - if plot in ["DLC", "All"]: - video_path = ( - DLCPoseEstimationSelection - & (PositionOutput.DLCPosV1 & key).fetch1("KEY") - ).fetch1("video_path") - else: - video_path = check_videofile( - video_dir, key["output_dir"], video_filename - )[0] - - nwb_base_filename = key["nwb_file_name"].replace(".nwb", "") - output_video_filename = Path( - f"{Path(output_dir).as_posix()}/{nwb_base_filename}{epoch:02d}_" - f"{key['plot']}_pos_overlay.mp4" - ).as_posix() - - # centroids = {'red': np.asarray(raw_position_df[['xloc', 'yloc']]), - # 'green': np.asarray(raw_position_df[['xloc2', 'yloc2']])} - - logger.info("Making video...") - - make_video( - video_path, - video_frame_inds, - position_mean_dict, - video_time, - np.asarray(pos_df.index), - processor="opencv", - output_video_filename=output_video_filename, - cm_to_pixels=meters_per_pixel * M_TO_CM, - disable_progressbar=False, - ) - self.insert1(key) diff --git a/src/spyglass/spikesorting/v0/__init__.py b/src/spyglass/spikesorting/v0/__init__.py index f15d25230..8b6035023 100644 --- a/src/spyglass/spikesorting/v0/__init__.py +++ b/src/spyglass/spikesorting/v0/__init__.py @@ -22,7 +22,6 @@ MetricParameters, MetricSelection, QualityMetrics, - UnitInclusionParameters, WaveformParameters, Waveforms, WaveformSelection, diff --git a/src/spyglass/spikesorting/v0/spikesorting_curation.py b/src/spyglass/spikesorting/v0/spikesorting_curation.py index acdebe352..78ed93bba 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_curation.py +++ b/src/spyglass/spikesorting/v0/spikesorting_curation.py @@ -1077,98 +1077,3 @@ def get_sort_group_info(cls, key): * SortGroup.SortGroupElectrode() ) * BrainRegion() return sort_group_info - - -@schema -class UnitInclusionParameters(SpyglassMixin, dj.Manual): - definition = """ - unit_inclusion_param_name: varchar(80) # the name of the list of thresholds for unit inclusion - --- - inclusion_param_dict: blob # the dictionary of inclusion / exclusion parameters - """ - - def insert1(self, key, **kwargs): - # check to see that the dictionary fits the specifications - # The inclusion parameter dict has the following form: - # param_dict['metric_name'] = (operator, value) - # where operator is '<', '>', <=', '>=', or '==' and value is the comparison (float) value to be used () - # param_dict['exclude_labels'] = [list of labels to exclude] - pdict = key["inclusion_param_dict"] - metrics_list = CuratedSpikeSorting().metrics_fields() - - for k in pdict: - if k not in metrics_list and k != "exclude_labels": - raise Exception( - f"key {k} is not a valid element of the inclusion_param_dict" - ) - if k in metrics_list: - if pdict[k][0] not in _comparison_to_function: - raise Exception( - f"operator {pdict[k][0]} for metric {k} is not in the valid operators list: {_comparison_to_function.keys()}" - ) - if k == "exclude_labels": - for label in pdict[k]: - if label not in valid_labels: - raise Exception( - f"exclude label {label} is not in the valid_labels list: {valid_labels}" - ) - super().insert1(key, **kwargs) - - def get_included_units( - self, curated_sorting_key, unit_inclusion_param_name - ): - """Given a reference to a set of curated sorting units and the name of - a unit inclusion parameter list, returns unit key - - Parameters - ---------- - curated_sorting_key : dict - key to select a set of curated sorting - unit_inclusion_param_name : str - name of a unit inclusion parameter entry - - Returns - ------- - dict - key to select all of the included units - """ - curated_sortings = (CuratedSpikeSorting() & curated_sorting_key).fetch() - inc_param_dict = ( - UnitInclusionParameters - & {"unit_inclusion_param_name": unit_inclusion_param_name} - ).fetch1("inclusion_param_dict") - units = (CuratedSpikeSorting().Unit() & curated_sortings).fetch() - units_key = (CuratedSpikeSorting().Unit() & curated_sortings).fetch( - "KEY" - ) - # get the list of labels to exclude if there is one - if "exclude_labels" in inc_param_dict: - exclude_labels = inc_param_dict["exclude_labels"] - del inc_param_dict["exclude_labels"] - else: - exclude_labels = [] - - # create a list of the units to kepp. - keep = np.asarray([True] * len(units)) - for metric in inc_param_dict: - # for all units, go through each metric, compare it to the value - # specified, and update the list to be kept - keep = np.logical_and( - keep, - _comparison_to_function[inc_param_dict[metric][0]]( - units[metric], inc_param_dict[metric][1] - ), - ) - - # now exclude by label if it is specified - if len(exclude_labels): - for unit_ind in np.ravel(np.argwhere(keep)): - labels = units[unit_ind]["label"].split(",") - for label in labels: - if label in exclude_labels: - keep[unit_ind] = False - break - - # return units that passed all of the tests - # TODO: Make this more efficient - return {i: units_key[i] for i in np.ravel(np.argwhere(keep))} From 8eadc303dab0291c8bb69772d9db992e5c2d3bb7 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Fri, 14 Jun 2024 14:06:55 -0700 Subject: [PATCH 53/60] Non-daemon parallel populate (#1001) * initial non daemon parallel commit * resolve namespace and pickling errors * fix linting * update changelog * implement review comments * add parallel_make flag to spikesorting recording tables * fix multiprocessing spawn error on mac * move propert --------- Co-authored-by: Samuel Bray --- CHANGELOG.md | 1 + src/spyglass/decoding/v1/waveform_features.py | 2 + .../spikesorting/v0/spikesorting_recording.py | 2 + src/spyglass/spikesorting/v1/recording.py | 2 + src/spyglass/utils/dj_helper_fn.py | 43 +++++++++++++++++++ src/spyglass/utils/dj_mixin.py | 40 ++++++++++++++++- 6 files changed, 89 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15cf86478..4e6c8640c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - Add pytests for position pipeline, various `test_mode` exceptions #966 - Migrate `pip` dependencies from `environment.yml`s to `pyproject.toml` #966 - Add documentation for common error messages #997 +- Allow mixin tables with parallelization in `make` to run populate with `processes > 1` #1001 ### Pipelines diff --git a/src/spyglass/decoding/v1/waveform_features.py b/src/spyglass/decoding/v1/waveform_features.py index 536ed4864..67f7ab692 100644 --- a/src/spyglass/decoding/v1/waveform_features.py +++ b/src/spyglass/decoding/v1/waveform_features.py @@ -102,6 +102,8 @@ class UnitWaveformFeatures(SpyglassMixin, dj.Computed): object_id: varchar(40) # the NWB object that stores the waveforms """ + _parallel_make = True + def make(self, key): AnalysisNwbfile()._creation_times["pre_create_time"] = time() # get the list of feature parameters diff --git a/src/spyglass/spikesorting/v0/spikesorting_recording.py b/src/spyglass/spikesorting/v0/spikesorting_recording.py index e2eda9b43..26016cf91 100644 --- a/src/spyglass/spikesorting/v0/spikesorting_recording.py +++ b/src/spyglass/spikesorting/v0/spikesorting_recording.py @@ -381,6 +381,8 @@ class SpikeSortingRecording(SpyglassMixin, dj.Computed): -> IntervalList.proj(sort_interval_list_name='interval_list_name') """ + _parallel_make = True + def make(self, key): sort_interval_valid_times = self._get_sort_interval_valid_times(key) recording = self._get_filtered_recording(key) diff --git a/src/spyglass/spikesorting/v1/recording.py b/src/spyglass/spikesorting/v1/recording.py index 43ccd5495..5fa069a28 100644 --- a/src/spyglass/spikesorting/v1/recording.py +++ b/src/spyglass/spikesorting/v1/recording.py @@ -216,6 +216,8 @@ class SpikeSortingRecordingSelection(SpyglassMixin, dj.Manual): -> LabTeam """ + _parallel_make = True + @classmethod def insert_selection(cls, key: dict): """Insert a row into SpikeSortingRecordingSelection with an diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 3fa18191c..85ff1922a 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -1,6 +1,7 @@ """Helper functions for manipulating information from DataJoint fetch calls.""" import inspect +import multiprocessing.pool import os from pathlib import Path from typing import List, Type, Union @@ -465,3 +466,45 @@ def make_file_obj_id_unique(nwb_path: str): f.attrs["object_id"] = new_id _resolve_external_table(nwb_path, nwb_path.split("/")[-1]) return new_id + + +def populate_pass_function(value): + """Pass function for parallel populate. + + Note: To avoid pickling errors, the table must be passed by class, NOT by instance. + Note: This function must be defined in the global namespace. + + Parameters + ---------- + value : (table, key, kwargs) + Class of table to populate, key to populate, and kwargs for populate + """ + table, key, kwargs = value + return table.populate(key, **kwargs) + + +class NonDaemonPool(multiprocessing.pool.Pool): + """NonDaemonPool. Used to create a pool of non-daemonized processes, + which are required for parallel populate operations in DataJoint. + """ + + # Explicitly set the start method to 'fork' + # Allows the pool to be used in MacOS, where the default start method is 'spawn' + multiprocessing.set_start_method("fork", force=True) + + def Process(self, *args, **kwds): + proc = super(NonDaemonPool, self).Process(*args, **kwds) + + class NonDaemonProcess(proc.__class__): + """Monkey-patch process to ensure it is never daemonized""" + + @property + def daemon(self): + return False + + @daemon.setter + def daemon(self, val): + pass + + proc.__class__ = NonDaemonProcess + return proc diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 35e54ea7a..51f398436 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -1,3 +1,4 @@ +import multiprocessing.pool from atexit import register as exit_register from atexit import unregister as exit_unregister from collections import OrderedDict @@ -19,7 +20,12 @@ from pymysql.err import DataError from spyglass.utils.database_settings import SHARED_MODULES -from spyglass.utils.dj_helper_fn import fetch_nwb, get_nwb_table +from spyglass.utils.dj_helper_fn import ( # NonDaemonPool, + NonDaemonPool, + fetch_nwb, + get_nwb_table, + populate_pass_function, +) from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK from spyglass.utils.dj_merge_tables import Merge, is_merge_table from spyglass.utils.logging import logger @@ -72,6 +78,7 @@ class SpyglassMixin: _member_pk = None # LabMember primary key. Mixin ambivalent table structure _banned_search_tables = set() # Tables to avoid in restrict_by + _parallel_make = False # Tables that use parallel processing in make def __init__(self, *args, **kwargs): """Initialize SpyglassMixin. @@ -655,6 +662,37 @@ def super_delete(self, warn=True, *args, **kwargs): self._log_delete(start=time(), super_delete=True) super().delete(*args, **kwargs) + # -------------------------- non-daemon populate -------------------------- + def populate(self, *restrictions, **kwargs): + """Populate table in parallel. + + Supersedes datajoint.table.Table.populate for classes with that + spawn processes in their make function + """ + + # Pass through to super if not parallel in the make function or only a single process + processes = kwargs.pop("processes", 1) + if processes == 1 or not self._parallel_make: + return super().populate(*restrictions, **kwargs) + + # If parallel in both make and populate, use non-daemon processes + # Get keys to populate + keys = (self._jobs_to_do(restrictions) - self.target).fetch( + "KEY", limit=kwargs.get("limit", None) + ) + # package the call list + call_list = [(type(self), key, kwargs) for key in keys] + + # Create a pool of non-daemon processes to populate a single entry each + pool = NonDaemonPool(processes=processes) + try: + pool.map(populate_pass_function, call_list) + except Exception as e: + raise e + finally: + pool.close() + pool.terminate() + # ------------------------------- Export Log ------------------------------- @cached_property From 97933e7a6d3dad383a72a8664a52aca2de626339 Mon Sep 17 00:00:00 2001 From: Kyu Hyun Lee Date: Tue, 18 Jun 2024 07:55:09 -0700 Subject: [PATCH 54/60] Give UUID to artifact interval (#993) * Give UUID to artifact interval * Add ability to set smoothing sigma in get_firing_rate (#994) * add option to set spike smoothing sigma * update changelog * Add docstrings to SortedSpikesGroup and Decoding methods (#996) * Add docstrings * update changelog * fix spelling --------- Co-authored-by: Samuel Bray * Add Common Errors doc (#997) * Add Common Errors * Update changelog * Mua notebook (#998) * documented some of mua notebook * mua notebook documented * documented some of mua notebook * synced py script * Dandi export and read (#956) * compile exported files, download dandiset, and organize * add function to translate files into dandi-compatible names * add table to store dandi name translation and steps to populate * add dandiset validation * add function to fetch nwb from dandi * add function to change obj_id of nwb_file * add dandi upload call and fix circular import * debug dandi file streaming * fix circular import * resolve dandi-streamed files with fetch_nwb * implement review comments * add admin tools to fix common dandi discrepencies * implement tool to cleanup common dandi errors * add dandi export to tutorial * fix linting * update changelog * fix spelling * style changes from review * reorganize function locations * fix circular import * make dandi dependency optional in imports * store dandi instance of data in DandiPath * resolve case of pre-existing dandi entries for export * cleanup bugs from refactor * update notebook * Apply suggestions from code review Co-authored-by: Chris Broz * add requested changes from review * make method check_admin_privilege in LabMember --------- Co-authored-by: Chris Broz * Minor fixes (#999) * give analysis nwb new uuid when created * fix function argument * update changelog * Fix bug in change in analysis_file object_id (#1004) * fix bug in change in analysis_file_object_id * update changelog * Remove classes for usused tables (#1003) * #976 * Remove notebook reference * Non-daemon parallel populate (#1001) * initial non daemon parallel commit * resolve namespace and pickling errors * fix linting * update changelog * implement review comments * add parallel_make flag to spikesorting recording tables * fix multiprocessing spawn error on mac * move propert --------- Co-authored-by: Samuel Bray * Update pipeline column for IntervalList --------- Co-authored-by: Samuel Bray Co-authored-by: Samuel Bray Co-authored-by: Chris Broz Co-authored-by: Denisse Morales-Rodriguez <68555303+denissemorales@users.noreply.github.com> Co-authored-by: Samuel Bray --- src/spyglass/lfp/v1/lfp_artifact.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/spyglass/lfp/v1/lfp_artifact.py b/src/spyglass/lfp/v1/lfp_artifact.py index 1f47a0884..70b8f2f2b 100644 --- a/src/spyglass/lfp/v1/lfp_artifact.py +++ b/src/spyglass/lfp/v1/lfp_artifact.py @@ -1,3 +1,5 @@ +import uuid + import datajoint as dj import numpy as np @@ -183,15 +185,7 @@ def make(self, key): dict( artifact_times=artifact_times, artifact_removed_valid_times=artifact_removed_valid_times, - # name for no-artifact time name using recording id - artifact_removed_interval_list_name="_".join( - [ - key["nwb_file_name"], - key["target_interval_list_name"], - "LFP", - key["artifact_params_name"], - ] - ), + artifact_removed_interval_list_name=uuid.uuid4(), ) ) @@ -199,11 +193,11 @@ def make(self, key): "nwb_file_name": key["nwb_file_name"], "interval_list_name": key["artifact_removed_interval_list_name"], "valid_times": key["artifact_removed_valid_times"], - "pipeline": "lfp_artifact", + "pipeline": self.full_table_name, } - LFPArtifactRemovedIntervalList.insert1(key, replace=True) - IntervalList.insert1(interval_key, replace=True) + LFPArtifactRemovedIntervalList.insert1(key) + IntervalList.insert1(interval_key) self.insert1(key) From d4f61af61ac4a360d06cc16f91f4a6bdf7e3d497 Mon Sep 17 00:00:00 2001 From: Kyu Hyun Lee Date: Mon, 24 Jun 2024 20:14:07 -0700 Subject: [PATCH 55/60] Fix artifact `list_triggers` (#1009) * Fix artifact list_triggers * Black * Update changelog --- CHANGELOG.md | 2 ++ src/spyglass/spikesorting/v1/sorting.py | 16 +++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e6c8640c..e61c612b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ #994 - Update docstrings #996 - Remove unused `UnitInclusionParameters` table from `spikesorting.v0` #1003 + - Fix bug in identification of artifact samples to be zeroed out in `spikesorting.v1.SpikeSorting` #1009 + ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/spikesorting/v1/sorting.py b/src/spyglass/spikesorting/v1/sorting.py index 84a936eea..f738c5ff0 100644 --- a/src/spyglass/spikesorting/v1/sorting.py +++ b/src/spyglass/spikesorting/v1/sorting.py @@ -171,15 +171,15 @@ def make(self, key: dict): sorter, sorter_params = ( SpikeSorterParameters * SpikeSortingSelection & key ).fetch1("sorter", "sorter_params") + recording_analysis_nwb_file_abs_path = AnalysisNwbfile.get_abs_path( + recording_key["analysis_file_name"] + ) # DO: # - load recording # - concatenate artifact removed intervals # - run spike sorting # - save output to NWB file - recording_analysis_nwb_file_abs_path = AnalysisNwbfile.get_abs_path( - recording_key["analysis_file_name"] - ) recording = se.read_nwb_recording( recording_analysis_nwb_file_abs_path, load_time_vector=True ) @@ -200,7 +200,7 @@ def make(self, key: dict): list_triggers = [] if artifact_removed_intervals_ind[0][0] > 0: list_triggers.append( - np.array([0, artifact_removed_intervals_ind[0][0]]) + np.arange(0, artifact_removed_intervals_ind[0][0]) ) for interval_ind in range(len(artifact_removed_intervals_ind) - 1): list_triggers.append( @@ -211,11 +211,9 @@ def make(self, key: dict): ) if artifact_removed_intervals_ind[-1][1] < len(timestamps): list_triggers.append( - np.array( - [ - artifact_removed_intervals_ind[-1][1], - len(timestamps) - 1, - ] + np.arange( + artifact_removed_intervals_ind[-1][1], + len(timestamps) - 1, ) ) From b5ba05a2f25e16e72551d2b9fa59df13180e392f Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Tue, 25 Jun 2024 11:20:42 -0700 Subject: [PATCH 56/60] remove problem key in DLCPosV1 fetch_nwb attrs (#1011) * remove problem key in DLCPosV1 fetch_nwb attrs * update changelog --- CHANGELOG.md | 2 +- src/spyglass/position/v1/position_dlc_selection.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e61c612b4..17adc0a97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - Raise `KeyError` for missing input parameters across helper funcs #966 - `DLCPosVideo` table now inserts into self after `make` #966 - Remove unused `PositionVideoSelection` and `PositionVideo` tables #1003 + - Fix SQL query error in `DLCPosV1.fetch_nwb` #1011 - Spikesorting - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 @@ -43,7 +44,6 @@ - Remove unused `UnitInclusionParameters` table from `spikesorting.v0` #1003 - Fix bug in identification of artifact samples to be zeroed out in `spikesorting.v1.SpikeSorting` #1009 - ## [0.5.2] (April 22, 2024) ### Infrastructure diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 02692ce14..8a283bb1d 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -180,6 +180,10 @@ def fetch1_dataframe(self): index=index, ) + def fetch_nwb(self, **kwargs): + attrs = [a for a in self.heading.names if not a == "pose_eval_result"] + return super().fetch_nwb(*attrs, **kwargs) + @classmethod def evaluate_pose_estimation(cls, key): likelihood_thresh = [] From fc4116783caddf1aa2abeff547d91927bb64af5c Mon Sep 17 00:00:00 2001 From: Chris Broz Date: Wed, 26 Jun 2024 10:51:43 -0500 Subject: [PATCH 57/60] Tidy position (#870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * WIP: pytests for common & lfp * WIP: pytests for utils 1 * ✅ : pytests for utils, position, linearization * Remove unnecessary lfp_band checks. Add deprecation warn on permission set * WIP: Tidy position 1 * WIP: Tidy position 2 * Spellcheck tests * Reduce pop_all_common redundancy * PosIntervalMap #849 * Logger decorator, unify make_video logic * Update changelog/requirements * Misc edits * Change deprecation warning * Video func name specificity * Revise centroid calc * Fix errors * Vectorize orient calc. Remove multitable stack warn * Revert blit --- .gitignore | 1 + CHANGELOG.md | 24 +- pyproject.toml | 1 + src/spyglass/common/common_behav.py | 118 +- src/spyglass/common/common_dandi.py | 2 + src/spyglass/common/common_device.py | 2 + src/spyglass/common/common_ephys.py | 10 +- src/spyglass/common/common_lab.py | 4 +- src/spyglass/common/common_position.py | 2 +- src/spyglass/common/common_session.py | 26 +- src/spyglass/common/common_usage.py | 19 + src/spyglass/common/populate_all_common.py | 4 + src/spyglass/data_import/insert_sessions.py | 3 +- src/spyglass/position/position_merge.py | 12 +- src/spyglass/position/v1/__init__.py | 4 +- src/spyglass/position/v1/dlc_decorators.py | 27 - src/spyglass/position/v1/dlc_reader.py | 2 + src/spyglass/position/v1/dlc_utils.py | 1258 ++++++----------- src/spyglass/position/v1/dlc_utils_makevid.py | 562 ++++++++ .../position/v1/position_dlc_centroid.py | 799 +++-------- .../position/v1/position_dlc_cohort.py | 82 +- .../position/v1/position_dlc_model.py | 60 +- .../position/v1/position_dlc_orient.py | 134 +- .../v1/position_dlc_pose_estimation.py | 355 +++-- .../position/v1/position_dlc_position.py | 291 ++-- .../position/v1/position_dlc_project.py | 412 +++--- .../position/v1/position_dlc_selection.py | 215 ++- .../position/v1/position_dlc_training.py | 286 ++-- .../position/v1/position_trodes_position.py | 252 +--- src/spyglass/utils/dj_helper_fn.py | 29 +- src/spyglass/utils/dj_merge_tables.py | 9 +- src/spyglass/utils/dj_mixin.py | 13 +- src/spyglass/utils/nwb_helper_fn.py | 12 +- tests/common/test_behav.py | 10 +- tests/common/test_device.py | 2 +- tests/common/test_ephys.py | 4 +- tests/common/test_insert.py | 30 +- tests/common/test_interval.py | 4 +- tests/common/test_interval_helpers.py | 2 +- tests/common/test_lab.py | 2 +- tests/common/test_region.py | 2 +- tests/common/test_session.py | 3 +- tests/conftest.py | 136 +- tests/container.py | 4 +- tests/data_downloader.py | 63 +- tests/lfp/test_lfp.py | 8 +- tests/position/conftest.py | 1 + tests/position/test_dlc_cent.py | 27 +- tests/position/test_dlc_model.py | 2 +- tests/position/test_dlc_pos_est.py | 4 +- tests/position/test_dlc_sel.py | 2 +- tests/position/test_dlc_train.py | 6 +- tests/position/test_trodes.py | 3 +- tests/utils/test_db_settings.py | 6 +- tests/utils/test_graph.py | 4 +- tests/utils/test_mixin.py | 4 +- tests/utils/test_nwb_helper_fn.py | 4 +- 57 files changed, 2424 insertions(+), 2939 deletions(-) delete mode 100644 src/spyglass/position/v1/dlc_decorators.py create mode 100644 src/spyglass/position/v1/dlc_utils_makevid.py diff --git a/.gitignore b/.gitignore index 6319e5f1c..1f18f4178 100644 --- a/.gitignore +++ b/.gitignore @@ -137,6 +137,7 @@ dmypy.json *.videoTimeStamps *.cameraHWSync *.stateScriptLog +tests/_data/* *.nwb *.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md index 17adc0a97..69d2d0a87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,12 @@ +```python +from spyglass.common.common_behav import PositionIntervalMap + +PositionIntervalMap.alter() +``` + ### Infrastructure - Create class `SpyglassGroupPart` to aid delete propagations #899 @@ -19,7 +25,8 @@ - Add pytests for position pipeline, various `test_mode` exceptions #966 - Migrate `pip` dependencies from `environment.yml`s to `pyproject.toml` #966 - Add documentation for common error messages #997 -- Allow mixin tables with parallelization in `make` to run populate with `processes > 1` #1001 +- Allow mixin tables with parallelization in `make` to run populate with + `processes > 1` #1001 ### Pipelines @@ -30,6 +37,9 @@ - Remove unused `ElectrodeBrainRegion` table #1003 - Files created by `AnalysisNwbfile.create()` receive new object_id #999, #1004 + - Remove redundant calls to tables in `populate_all_common` #870 + - Improve logging clarity in `populate_all_common` #870 + - `PositionIntervalMap` now inserts null entries for missing intervals #870 - Decoding: Default values for classes on `ImportError` #966 - Position - Allow dlc without pre-existing tracking data #973, #975 @@ -37,12 +47,18 @@ - `DLCPosVideo` table now inserts into self after `make` #966 - Remove unused `PositionVideoSelection` and `PositionVideo` tables #1003 - Fix SQL query error in `DLCPosV1.fetch_nwb` #1011 + - Add keyword args to all calls of `convert_to_pixels` #870 + - Unify `make_video` logic across `DLCPosVideo` and `TrodesVideo` #870 + - Replace `OutputLogger` context manager with decorator #870 + - Rename `check_videofile` -> `find_mp4` and `get_video_path` -> + `get_video_info` to reflect actual use #870 - Spikesorting - Allow user to set smoothing timescale in `SortedSpikesGroup.get_firing_rate` #994 - Update docstrings #996 - Remove unused `UnitInclusionParameters` table from `spikesorting.v0` #1003 - - Fix bug in identification of artifact samples to be zeroed out in `spikesorting.v1.SpikeSorting` #1009 + - Fix bug in identification of artifact samples to be zeroed out in + `spikesorting.v1.SpikeSorting` #1009 ## [0.5.2] (April 22, 2024) @@ -86,11 +102,15 @@ ### Pipelines +- Common: + - Add ActivityLog to `common_usage` to track unreferenced utilities. #870 - Position: - Fixes to `environment-dlc.yml` restricting tensortflow #834 - Video restriction for multicamera epochs #834 - Fixes to `_convert_mp4` #834 - Replace deprecated calls to `yaml.safe_load()` #834 + - Refactoring to reduce redundancy #870 + - Migrate `OutputLogger` behavior to decorator #870 - Spikesorting: - Increase`spikeinterface` version to >=0.99.1, \<0.100 #852 - Bug fix in single artifact interval edge case #859 diff --git a/pyproject.toml b/pyproject.toml index 78d189b73..2538b00dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,6 +72,7 @@ test = [ "kachery", # database access "kachery-client", "kachery-cloud>=0.4.0", + "opencv-python-headless", # for headless testing of Qt "pre-commit", # linting "pytest", # unit testing "pytest-cov", # code coverage diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index 67e6e35d9..a1397769b 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -14,7 +14,7 @@ from spyglass.common.common_nwbfile import Nwbfile from spyglass.common.common_session import Session # noqa: F401 from spyglass.common.common_task import TaskEpoch -from spyglass.settings import video_dir +from spyglass.settings import test_mode, video_dir from spyglass.utils import SpyglassMixin, logger from spyglass.utils.nwb_helper_fn import ( get_all_spatial_series, @@ -56,8 +56,8 @@ def make(self, keys: Union[List[Dict], dj.Table]): keys = [keys] if isinstance(keys[0], (dj.Table, dj.expression.QueryExpression)): keys = [k for tbl in keys for k in tbl.fetch("KEY", as_dict=True)] - for key in keys: - nwb_file_name = key.get("nwb_file_name") + nwb_files = set(key.get("nwb_file_name") for key in keys) + for nwb_file_name in nwb_files: # Only unique nwb files if not nwb_file_name: raise ValueError("PositionSource.make requires nwb_file_name") self.insert_from_nwbfile(nwb_file_name, skip_duplicates=True) @@ -311,7 +311,7 @@ def make(self, key): if associated_files is None: logger.info( "Unable to import StateScriptFile: no processing module named " - + '"associated_files" found in {nwb_file_name}.' + + f'"associated_files" found in {nwb_file_name}.' ) return # See #849 @@ -377,10 +377,12 @@ class VideoFile(SpyglassMixin, dj.Imported): def make(self, key): self._no_transaction_make(key) - def _no_transaction_make(self, key, verbose=True): + def _no_transaction_make(self, key, verbose=True, skip_duplicates=False): if not self.connection.in_transaction: self.populate(key) return + if test_mode: + skip_duplicates = True nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) @@ -404,6 +406,7 @@ def _no_transaction_make(self, key, verbose=True): "interval_list_name": interval_list_name, } ).fetch1("valid_times") + cam_device_str = r"camera_device (\d+)" is_found = False for ind, video in enumerate(videos.values()): @@ -413,28 +416,35 @@ def _no_transaction_make(self, key, verbose=True): # check to see if the times for this video_object are largely # overlapping with the task epoch times - if len( + if not len( interval_list_contains(valid_times, video_obj.timestamps) > 0.9 * len(video_obj.timestamps) ): - nwb_cam_device = video_obj.device.name - # returns whatever was captured in the first group (within the parentheses) of the regular expression -- in this case, 0 - key["video_file_num"] = int( - re.match(cam_device_str, nwb_cam_device)[1] - ) - camera_name = video_obj.device.camera_name - if CameraDevice & {"camera_name": camera_name}: - key["camera_name"] = video_obj.device.camera_name - else: - raise KeyError( - f"No camera with camera_name: {camera_name} found " - + "in CameraDevice table." - ) - key["video_file_object_id"] = video_obj.object_id - self.insert1( - key, skip_duplicates=True, allow_direct_insert=True + continue + + nwb_cam_device = video_obj.device.name + + # returns whatever was captured in the first group (within the + # parentheses) of the regular expression - in this case, 0 + + key["video_file_num"] = int( + re.match(cam_device_str, nwb_cam_device)[1] + ) + camera_name = video_obj.device.camera_name + if CameraDevice & {"camera_name": camera_name}: + key["camera_name"] = video_obj.device.camera_name + else: + raise KeyError( + f"No camera with camera_name: {camera_name} found " + + "in CameraDevice table." ) - is_found = True + key["video_file_object_id"] = video_obj.object_id + self.insert1( + key, + skip_duplicates=skip_duplicates, + allow_direct_insert=True, + ) + is_found = True if not is_found and verbose: logger.info( @@ -443,7 +453,7 @@ def _no_transaction_make(self, key, verbose=True): ) @classmethod - def update_entries(cls, restrict={}): + def update_entries(cls, restrict=True): existing_entries = (cls & restrict).fetch("KEY") for row in existing_entries: if (cls & row).fetch1("camera_name"): @@ -495,9 +505,11 @@ class PositionIntervalMap(SpyglassMixin, dj.Computed): definition = """ -> IntervalList --- - position_interval_name: varchar(200) # name of the corresponding position interval + position_interval_name="": varchar(200) # name of the corresponding interval """ + # #849 - Insert null to avoid rerun + def make(self, key): self._no_transaction_make(key) @@ -510,6 +522,8 @@ def _no_transaction_make(self, key): # if not called in the context of a make function, call its own make function self.populate(key) return + if self & key: + return # *** HARD CODED VALUES *** EPSILON = 0.51 # tolerated time diff in bounds across epoch/pos @@ -517,11 +531,13 @@ def _no_transaction_make(self, key): nwb_file_name = key["nwb_file_name"] pos_intervals = get_pos_interval_list_names(nwb_file_name) + null_key = dict(key, position_interval_name="") + insert_opts = dict(allow_direct_insert=True, skip_duplicates=True) # Skip populating if no pos interval list names if len(pos_intervals) == 0: - # TODO: Now that populate_all accept errors, raise here? logger.error(f"NO POS INTERVALS FOR {key}; {no_pop_msg}") + self.insert1(null_key, **insert_opts) return valid_times = (IntervalList & key).fetch1("valid_times") @@ -535,7 +551,6 @@ def _no_transaction_make(self, key): f"nwb_file_name='{nwb_file_name}' AND interval_list_name=" + "'{}'" ) for pos_interval in pos_intervals: - # cbroz: fetch1->fetch. fetch1 would fail w/o result pos_times = (IntervalList & restr.format(pos_interval)).fetch( "valid_times" ) @@ -558,16 +573,18 @@ def _no_transaction_make(self, key): # Check that each pos interval was matched to only one epoch if len(matching_pos_intervals) != 1: - # TODO: Now that populate_all accept errors, raise here? logger.warning( - f"Found {len(matching_pos_intervals)} pos intervals for {key}; " - + f"{no_pop_msg}\n{matching_pos_intervals}" + f"{no_pop_msg}. Found {len(matching_pos_intervals)} pos intervals for " + + f"\n\t{key}\n\tMatching intervals: {matching_pos_intervals}" ) + self.insert1(null_key, **insert_opts) return # Insert into table - key["position_interval_name"] = matching_pos_intervals[0] - self.insert1(key, skip_duplicates=True, allow_direct_insert=True) + self.insert1( + dict(key, position_interval_name=matching_pos_intervals[0]), + **insert_opts, + ) logger.info( "Populated PosIntervalMap for " + f'{nwb_file_name}, {key["interval_list_name"]}' @@ -609,19 +626,27 @@ def convert_epoch_interval_name_to_position_interval_name( ) pos_query = PositionIntervalMap & key + pos_str = "position_interval_name" - if len(pos_query) == 0: - if populate_missing: - PositionIntervalMap()._no_transaction_make(key) - pos_query = PositionIntervalMap & key + no_entries = len(pos_query) == 0 + null_entry = pos_query.fetch(pos_str)[0] == "" if len(pos_query) else False - if len(pos_query) == 0: + if populate_missing and (no_entries or null_entry): + if null_entry: + pos_query.delete(safemode=False) # no prompt + PositionIntervalMap()._no_transaction_make(key) + pos_query = PositionIntervalMap & key + + if pos_query.fetch(pos_str)[0] == "": logger.info(f"No position intervals found for {key}") return [] if len(pos_query) == 1: return pos_query.fetch1("position_interval_name") + else: + raise ValueError(f"Multiple intervals found for {key}: {pos_query}") + def get_interval_list_name_from_epoch(nwb_file_name: str, epoch: int) -> str: """Returns the interval list name for the given epoch. @@ -653,13 +678,12 @@ def get_interval_list_name_from_epoch(nwb_file_name: str, epoch: int) -> str: def populate_position_interval_map_session(nwb_file_name: str): - for interval_name in (TaskEpoch & {"nwb_file_name": nwb_file_name}).fetch( - "interval_list_name" - ): - with PositionIntervalMap._safe_context(): - PositionIntervalMap().make( - { - "nwb_file_name": nwb_file_name, - "interval_list_name": interval_name, - } - ) + # 1. remove redundancy in interval names + # 2. let PositionIntervalMap handle transaction context + nwb_dict = dict(nwb_file_name=nwb_file_name) + intervals = (TaskEpoch & nwb_dict).fetch("interval_list_name") + for interval_name in set(intervals): + interval_dict = dict(interval_list_name=interval_name) + if PositionIntervalMap & interval_dict: + continue + PositionIntervalMap().make(dict(nwb_dict, **interval_dict)) diff --git a/src/spyglass/common/common_dandi.py b/src/spyglass/common/common_dandi.py index e3c2836e1..8264de4cb 100644 --- a/src/spyglass/common/common_dandi.py +++ b/src/spyglass/common/common_dandi.py @@ -8,6 +8,8 @@ import pynwb from fsspec.implementations.cached import CachingFileSystem +from spyglass.utils import logger + try: import dandi.download import dandi.organize diff --git a/src/spyglass/common/common_device.py b/src/spyglass/common/common_device.py index 96fa11d44..b2e764b24 100644 --- a/src/spyglass/common/common_device.py +++ b/src/spyglass/common/common_device.py @@ -703,6 +703,8 @@ def prompt_insert( if table_type: table_type += " " + else: + table_type = "" logger.info( f"{table}{table_type} '{name}' was not found in the" diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index 7e394bd2d..146efeea1 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -98,14 +98,10 @@ class Electrode(SpyglassMixin, dj.Imported): """ def make(self, key): - """Make without transaction - - Allows populate_all_common to work within a single transaction.""" - nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) - config = get_config(nwb_file_abspath) + config = get_config(nwb_file_abspath, calling_table=self.camel_name) if "Electrode" in config: electrode_config_dicts = { @@ -202,7 +198,7 @@ def create_from_config(cls, nwb_file_name: str): """ nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) - config = get_config(nwb_file_abspath) + config = get_config(nwb_file_abspath, calling_table=cls.__name__) if "Electrode" not in config: return # See #849 @@ -323,7 +319,7 @@ def make(self, key): # same nwb_object_id logger.info( - f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz\n' + f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz\n\t' + f'Number of valid intervals:\t{len(interval_dict["valid_times"])}' ) diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index bd8a90262..5958a4de9 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -213,8 +213,8 @@ def create_new_team( ) if not query: logger.info( - f"Please add the Google user ID for {team_member} in " - + "LabMember.LabMemberInfo to help manage permissions." + "To help manage permissions in LabMemberInfo, please add Google " + + f"user ID for {team_member}" ) labteammember_dict = { "team_name": team_name, diff --git a/src/spyglass/common/common_position.py b/src/spyglass/common/common_position.py index ed91aa463..f94cfff67 100644 --- a/src/spyglass/common/common_position.py +++ b/src/spyglass/common/common_position.py @@ -705,7 +705,7 @@ def make_video( head_position = head_position_mean[time_ind] head_position = self.convert_to_pixels( - head_position, frame_size, cm_to_pixels + data=head_position, cm_to_pixels=cm_to_pixels ) head_orientation = head_orientation_mean[time_ind] diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index b8139939a..186562444 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -64,7 +64,7 @@ def make(self, key): nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) - config = get_config(nwb_file_abspath) + config = get_config(nwb_file_abspath, calling_table=self.camel_name) # certain data are not associated with a single NWB file / session # because they may apply to multiple sessions. these data go into @@ -77,26 +77,26 @@ def make(self, key): # via fields of Session (e.g., Subject, Institution, Lab) or part # tables (e.g., Experimenter, DataAcquisitionDevice). - logger.info("Institution...") + logger.info("Session populates Institution...") Institution().insert_from_nwbfile(nwbf) - logger.info("Lab...") + logger.info("Session populates Lab...") Lab().insert_from_nwbfile(nwbf) - logger.info("LabMember...") + logger.info("Session populates LabMember...") LabMember().insert_from_nwbfile(nwbf) - logger.info("Subject...") + logger.info("Session populates Subject...") Subject().insert_from_nwbfile(nwbf) if not debug_mode: # TODO: remove when demo files agree on device - logger.info("Populate DataAcquisitionDevice...") + logger.info("Session populates Populate DataAcquisitionDevice...") DataAcquisitionDevice.insert_from_nwbfile(nwbf, config) - logger.info("Populate CameraDevice...") + logger.info("Session populates Populate CameraDevice...") CameraDevice.insert_from_nwbfile(nwbf) - logger.info("Populate Probe...") + logger.info("Session populates Populate Probe...") Probe.insert_from_nwbfile(nwbf, config) if nwbf.subject is not None: @@ -126,7 +126,7 @@ def make(self, key): # interval lists depend on Session (as a primary key) but users may want to add these manually so this is # a manual table that is also populated from NWB files - logger.info("IntervalList...") + logger.info("Session populates IntervalList...") IntervalList().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name) # logger.info('Unit...') @@ -148,8 +148,8 @@ def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config): } if len(query) == 0: logger.warn( - f"DataAcquisitionDevice with name {device_name} does not exist. " - "Cannot link Session with DataAcquisitionDevice in Session.DataAcquisitionDevice." + "Cannot link Session with DataAcquisitionDevice.\n" + + f"DataAcquisitionDevice does not exist: {device_name}" ) continue key = dict() @@ -166,8 +166,8 @@ def _add_experimenter_part(self, nwb_file_name, nwbf): query = LabMember & {"lab_member_name": name} if len(query) == 0: logger.warn( - f"LabMember with name {name} does not exist. " - "Cannot link Session with LabMember in Session.Experimenter." + "Cannot link Session with LabMember. " + + f"LabMember does not exist: {name}" ) continue diff --git a/src/spyglass/common/common_usage.py b/src/spyglass/common/common_usage.py index 9d408b5bc..6616fedf6 100644 --- a/src/spyglass/common/common_usage.py +++ b/src/spyglass/common/common_usage.py @@ -55,6 +55,25 @@ class InsertError(dj.Manual): """ +@schema +class ActivityLog(dj.Manual): + """A log of suspected low-use features worth deprecating.""" + + definition = """ + id: int auto_increment + --- + function: varchar(64) + dj_user: varchar(64) + timestamp=CURRENT_TIMESTAMP: timestamp + """ + + @classmethod + def deprecate_log(cls, name, warning=True) -> None: + if warning: + logger.warning(f"DEPRECATION scheduled for version 0.6: {name}") + cls.insert1(dict(dj_user=dj.config["database.user"], function=name)) + + @schema class ExportSelection(SpyglassMixin, dj.Manual): definition = """ diff --git a/src/spyglass/common/populate_all_common.py b/src/spyglass/common/populate_all_common.py index e78b68de1..d3b0d7f62 100644 --- a/src/spyglass/common/populate_all_common.py +++ b/src/spyglass/common/populate_all_common.py @@ -79,6 +79,10 @@ def single_transaction_make( for parent in parents[1:]: key_source *= parent.proj() + if table.__name__ == "PositionSource": + # PositionSource only uses nwb_file_name - full calls redundant + key_source = dj.U("nwb_file_name") & key_source + for pop_key in (key_source & file_restr).fetch("KEY"): try: table().make(pop_key) diff --git a/src/spyglass/data_import/insert_sessions.py b/src/spyglass/data_import/insert_sessions.py index a5d539e8e..b4dc1d406 100644 --- a/src/spyglass/data_import/insert_sessions.py +++ b/src/spyglass/data_import/insert_sessions.py @@ -114,8 +114,7 @@ def copy_nwb_link_raw_ephys(nwb_file_name, out_nwb_file_name): if debug_mode: return out_nwb_file_abs_path logger.warning( - f"Output file {out_nwb_file_abs_path} exists and will be " - + "overwritten." + f"Output file exists, will be overwritten: {out_nwb_file_abs_path}" ) with pynwb.NWBHDF5IO( diff --git a/src/spyglass/position/position_merge.py b/src/spyglass/position/position_merge.py index ea2d574a2..b6346b938 100644 --- a/src/spyglass/position/position_merge.py +++ b/src/spyglass/position/position_merge.py @@ -1,20 +1,10 @@ import datajoint as dj -import numpy as np -import pandas as pd from datajoint.utils import to_camel_case from spyglass.common.common_position import IntervalPositionInfo as CommonPos -from spyglass.position.v1.dlc_utils import ( - check_videofile, - get_video_path, - make_video, -) -from spyglass.position.v1.position_dlc_pose_estimation import ( - DLCPoseEstimationSelection, -) from spyglass.position.v1.position_dlc_selection import DLCPosV1 from spyglass.position.v1.position_trodes_position import TrodesPosV1 -from spyglass.utils import SpyglassMixin, _Merge, logger +from spyglass.utils import SpyglassMixin, _Merge schema = dj.schema("position_merge") diff --git a/src/spyglass/position/v1/__init__.py b/src/spyglass/position/v1/__init__.py index 20f4cf071..9fc821cb6 100644 --- a/src/spyglass/position/v1/__init__.py +++ b/src/spyglass/position/v1/__init__.py @@ -1,12 +1,12 @@ from .dlc_reader import do_pose_estimation, read_yaml, save_yaml from .dlc_utils import ( _convert_mp4, - check_videofile, find_full_path, + find_mp4, find_root_directory, get_dlc_processed_data_dir, get_dlc_root_data_dir, - get_video_path, + get_video_info, ) from .position_dlc_centroid import ( DLCCentroid, diff --git a/src/spyglass/position/v1/dlc_decorators.py b/src/spyglass/position/v1/dlc_decorators.py deleted file mode 100644 index 111d7508a..000000000 --- a/src/spyglass/position/v1/dlc_decorators.py +++ /dev/null @@ -1,27 +0,0 @@ -## dlc_decorators - - -def accepts(*vals, **kwargs): - is_method = kwargs.pop("is_method", True) - - def check_accepts(f): - if is_method: - assert len(vals) == f.__code__.co_argcount - 1 - else: - assert len(vals) == f.__code__.co_argcount - - def new_f(*args, **kwargs): - pargs = args[1:] if is_method else args - for a, t in zip(pargs, vals): # assume first arg is self or cls - if t is None: - continue - assert a in t, "arg %r is not in %s" % (a, t) - if is_method: - return f(args[0], *pargs, **kwargs) - else: - return f(*args, **kwargs) - - new_f.__name__ = f.__name__ - return new_f - - return check_accepts diff --git a/src/spyglass/position/v1/dlc_reader.py b/src/spyglass/position/v1/dlc_reader.py index caa3c2e5c..c3d8e8af8 100644 --- a/src/spyglass/position/v1/dlc_reader.py +++ b/src/spyglass/position/v1/dlc_reader.py @@ -8,6 +8,7 @@ import pandas as pd import ruamel.yaml as yaml +from spyglass.common.common_usage import ActivityLog from spyglass.settings import test_mode @@ -20,6 +21,7 @@ def __init__( yml_path=None, filename_prefix="", ): + ActivityLog.deprecate_log("dlc_reader: PoseEstimation") if dlc_dir is None: assert pkl_path and h5_path and yml_path, ( 'If "dlc_dir" is not provided, then pkl_path, h5_path, and yml_path ' diff --git a/src/spyglass/position/v1/dlc_utils.py b/src/spyglass/position/v1/dlc_utils.py index 6d27615e4..1523e01b4 100644 --- a/src/spyglass/position/v1/dlc_utils.py +++ b/src/spyglass/position/v1/dlc_utils.py @@ -3,25 +3,25 @@ import grp import logging import os -import pathlib import pwd import subprocess import sys from collections import abc -from contextlib import redirect_stdout -from itertools import groupby +from functools import reduce +from itertools import combinations, groupby from operator import itemgetter +from pathlib import Path, PosixPath from typing import Iterable, Union import datajoint as dj -import matplotlib.pyplot as plt import numpy as np import pandas as pd -from tqdm import tqdm as tqdm +from position_tools import get_distance from spyglass.common.common_behav import VideoFile -from spyglass.settings import dlc_output_dir, dlc_video_dir, raw_dir, test_mode -from spyglass.utils import logger +from spyglass.common.common_usage import ActivityLog +from spyglass.settings import dlc_output_dir, dlc_video_dir, raw_dir +from spyglass.utils.logging import logger, stream_handler def validate_option( @@ -111,6 +111,7 @@ def validate_smooth_params(params): if not params.get("smooth"): return smoothing_params = params.get("smoothing_params") + validate_option(option=smoothing_params, name="smoothing_params") validate_option( option=smoothing_params.get("smooth_method"), name="smooth_method", @@ -145,8 +146,9 @@ def _set_permissions(directory, mode, username: str, groupname: str = None): ------- None """ + ActivityLog().deprecate_log("dlc_utils: _set_permissions") - directory = pathlib.Path(directory) + directory = Path(directory) assert directory.exists(), f"Target directory: {directory} does not exist" uid = pwd.getpwnam(username).pw_uid if groupname: @@ -161,135 +163,52 @@ def _set_permissions(directory, mode, username: str, groupname: str = None): os.chmod(os.path.join(dirpath, filename), mode) -class OutputLogger: # TODO: migrate to spyglass.utils.logger - """ - A class to wrap a logging.Logger object in order to provide context manager capabilities. - - This class uses contextlib.redirect_stdout to temporarily redirect sys.stdout and thus - print statements to the log file instead of, or as well as the console. +def file_log(logger, console=False): + """Decorator to add a file handler to a logger. - Attributes + Parameters ---------- logger : logging.Logger - logger object - name : str - name of logger - level : int - level of logging that the logger is set to handle + Logger to add file handler to. + console : bool, optional + If True, logged info will also be printed to console. Default False. - Methods + Example ------- - setup_logger(name_logfile, path_logfile, print_console=False) - initialize or get logger object with name_logfile - that writes to path_logfile - - Examples - -------- - >>> with OutputLogger(name, path, print_console=True) as logger: - ... print("this will print to logfile") - ... logger.logger.info("this will log to the logfile") - ... print("this will print to the console") - ... logger.logger.info("this will log to the logfile") - + @file_log(logger, console=True) + def func(self, *args, **kwargs): + pass """ - def __init__(self, name, path, level="INFO", **kwargs): - self.logger = self.setup_logger(name, path, **kwargs) - self.name = self.logger.name - self.level = 30 if test_mode else getattr(logging, level) - - def setup_logger( - self, name_logfile, path_logfile, print_console=False - ) -> logging.Logger: - """ - Sets up a logger for that outputs to a file, and optionally, the console - - Parameters - ---------- - name_logfile : str - name of the logfile to use - path_logfile : str - path to the file that should be used as the file handler - print_console : bool, default-False - if True, prints to console as well as log file. - - Returns - ------- - logger : logging.Logger - the logger object with specified handlers - """ - - logger = logging.getLogger(name_logfile) - # check to see if handlers already exist for this logger - if logger.handlers: - for handler in logger.handlers: - # if it's a file handler - # type is used instead of isinstance, - # which doesn't work properly with logging.StreamHandler - if type(handler) == logging.FileHandler: - # if paths don't match, change file handler path - if not os.path.samefile(handler.baseFilename, path_logfile): - handler.close() - logger.removeHandler(handler) - file_handler = self._get_file_handler(path_logfile) - logger.addHandler(file_handler) - # if a stream handler exists and - # if print_console is False remove streamHandler - if type(handler) == logging.StreamHandler: - if not print_console: - handler.close() - logger.removeHandler(handler) - if print_console and not any( - type(handler) == logging.StreamHandler - for handler in logger.handlers - ): - logger.addHandler(self._get_stream_handler()) - - else: - file_handler = self._get_file_handler(path_logfile) + def decorator(func): + def wrapper(self, *args, **kwargs): + if not (log_path := getattr(self, "log_path", None)): + self.log_path = f"temp_{self.__class__.__name__}.log" + file_handler = logging.FileHandler(log_path, mode="a") + file_fmt = logging.Formatter( + "[%(asctime)s][%(levelname)s] Spyglass " + + "%(filename)s:%(lineno)d: %(message)s", + datefmt="%y-%m-%d %H:%M:%S", + ) + file_handler.setFormatter(file_fmt) logger.addHandler(file_handler) - if print_console: - logger.addHandler(self._get_stream_handler()) - logger.setLevel(logging.INFO) - return logger - - def _get_file_handler(self, path): - output_dir = pathlib.Path(os.path.dirname(path)) - if not os.path.exists(output_dir): - output_dir.mkdir(parents=True, exist_ok=True) - file_handler = logging.FileHandler(path, mode="a") - file_handler.setFormatter(self._get_formatter()) - return file_handler - - def _get_stream_handler(self): - stream_handler = logging.StreamHandler() - stream_handler.setFormatter(self._get_formatter()) - return stream_handler - - def _get_formatter(self): - return logging.Formatter( - "[%(asctime)s] in %(pathname)s, line %(lineno)d: %(message)s", - datefmt="%d-%b-%y %H:%M:%S", - ) - - def write(self, msg): - if msg and not msg.isspace(): - self.logger.log(self.level, msg) - - def flush(self): - pass + if not console: + logger.removeHandler(logger.handlers[0]) + try: + return func(self, *args, **kwargs) + finally: + if not console: + logger.addHandler(stream_handler) + logger.removeHandler(file_handler) + file_handler.close() - def __enter__(self): - self._redirector = redirect_stdout(self) - self._redirector.__enter__() - return self + return wrapper - def __exit__(self, exc_type, exc_value, traceback): - # let contextlib do any exception handling here - self._redirector.__exit__(exc_type, exc_value, traceback) + return decorator def get_dlc_root_data_dir(): + ActivityLog().deprecate_log("dlc_utils: get_dlc_root_data_dir") if "custom" in dj.config: if "dlc_root_data_dir" in dj.config["custom"]: dlc_root_dirs = dj.config.get("custom", {}).get("dlc_root_data_dir") @@ -307,13 +226,14 @@ def get_dlc_root_data_dir(): def get_dlc_processed_data_dir() -> str: """Returns session_dir relative to custom 'dlc_output_dir' root""" + ActivityLog().deprecate_log("dlc_utils: get_dlc_processed_data_dir") if "custom" in dj.config: if "dlc_output_dir" in dj.config["custom"]: dlc_output_dir = dj.config.get("custom", {}).get("dlc_output_dir") if dlc_output_dir: - return pathlib.Path(dlc_output_dir) + return Path(dlc_output_dir) else: - return pathlib.Path("/nimbus/deeplabcut/output/") + return Path("/nimbus/deeplabcut/output/") def find_full_path(root_directories, relative_path): @@ -323,15 +243,16 @@ def find_full_path(root_directories, relative_path): from provided potential root directories (in the given order) :param root_directories: potential root directories :param relative_path: the relative path to find the valid root directory - :return: full-path (pathlib.Path object) + :return: full-path (Path object) """ + ActivityLog().deprecate_log("dlc_utils: find_full_path") relative_path = _to_Path(relative_path) if relative_path.exists(): return relative_path # Turn to list if only a single root directory is provided - if isinstance(root_directories, (str, pathlib.Path)): + if isinstance(root_directories, (str, Path)): root_directories = [_to_Path(root_directories)] for root_dir in root_directories: @@ -351,15 +272,16 @@ def find_root_directory(root_directories, full_path): search and return one directory that is the parent of the given path :param root_directories: potential root directories :param full_path: the full path to search the root directory - :return: root_directory (pathlib.Path object) + :return: root_directory (Path object) """ + ActivityLog().deprecate_log("dlc_utils: find_full_path") full_path = _to_Path(full_path) if not full_path.exists(): raise FileNotFoundError(f"{full_path} does not exist!") # Turn to list if only a single root directory is provided - if isinstance(root_directories, (str, pathlib.Path)): + if isinstance(root_directories, (str, Path)): root_directories = [_to_Path(root_directories)] try: @@ -383,8 +305,6 @@ def infer_output_dir(key, makedir=True): ---------- key: DataJoint key specifying a pairing of VideoFile and Model. """ - # TODO: add check to make sure interval_list_name refers to a single epoch - # Or make key include epoch in and of itself instead of interval_list_name file_name = key.get("nwb_file_name") dlc_model_name = key.get("dlc_model_name") @@ -395,29 +315,29 @@ def infer_output_dir(key, makedir=True): "Key must contain 'nwb_file_name', 'dlc_model_name', and 'epoch'" ) - nwb_file_name = file_name.split("_.")[0] - output_dir = pathlib.Path(dlc_output_dir) / pathlib.Path( + nwb_file_name = key["nwb_file_name"].split("_.")[0] + output_dir = Path(dlc_output_dir) / Path( f"{nwb_file_name}/{nwb_file_name}_{key['epoch']:02}" f"_model_" + key["dlc_model_name"].replace(" ", "-") ) - if makedir is True: - if not os.path.exists(output_dir): - output_dir.mkdir(parents=True, exist_ok=True) + if makedir: + output_dir.mkdir(parents=True, exist_ok=True) return output_dir def _to_Path(path): """ - Convert the input "path" into a pathlib.Path object + Convert the input "path" into a Path object Handles one odd Windows/Linux incompatibility of the "\\" """ - return pathlib.Path(str(path).replace("\\", "/")) + return Path(str(path).replace("\\", "/")) -def get_video_path(key): - """ +def get_video_info(key): + """Returns video path for a given key. + Given nwb_file_name and interval_list_name returns specified - video file filename and path + video file filename, path, meters_per_pixel, and timestamps. Parameters ---------- @@ -430,16 +350,21 @@ def get_video_path(key): path to the video file, including video filename video_filename : str filename of the video + meters_per_pixel : float + meters per pixel conversion factor + timestamps : np.array + timestamps of the video """ import pynwb - vf_key = {k: val for k, val in key.items() if k in VideoFile.heading.names} - if not VideoFile & vf_key: - VideoFile()._no_transaction_make(vf_key, verbose=False) + vf_key = {k: val for k, val in key.items() if k in VideoFile.heading} video_query = VideoFile & vf_key + if not video_query: + VideoFile()._no_transaction_make(vf_key, verbose=False) + if len(video_query) != 1: - print(f"Found {len(video_query)} videos for {vf_key}") + logger.warning(f"Found {len(video_query)} videos for {vf_key}") return None, None, None, None video_info = video_query.fetch1() @@ -457,15 +382,13 @@ def get_video_path(key): return video_dir, video_filename, meters_per_pixel, timestamps -def check_videofile( - video_path: Union[str, pathlib.PosixPath], - output_path: Union[str, pathlib.PosixPath] = dlc_video_dir, +def find_mp4( + video_path: Union[str, PosixPath], + output_path: Union[str, PosixPath] = dlc_video_dir, video_filename: str = None, video_filetype: str = "h264", ): - """ - Checks the file extension of a video file to make sure it is .mp4 for - DeepLabCut processes. Converts to MP4 if not already. + """Check for video file and convert to .mp4 if necessary. Parameters ---------- @@ -474,44 +397,50 @@ def check_videofile( output_path : str or PosixPath object path to directory where converted video will be saved video_filename : str, Optional - filename of the video to convert, if not provided, video_filetype must be - and all video files of video_filetype in the directory will be converted + filename of the video to convert, if not provided, video_filetype must + be and all video files of video_filetype in the directory will be + converted video_filetype : str or List, Default 'h264', Optional If video_filename is not provided, all videos of this filetype will be converted to .mp4 Returns ------- - output_files : List of PosixPath objects - paths to converted video file(s) + PosixPath object + path to converted video file """ - if not video_filename: - video_files = pathlib.Path(video_path).glob(f"*.{video_filetype}") - else: - video_files = [pathlib.Path(f"{video_path}/{video_filename}")] - output_files = [] - for video_filepath in video_files: - if video_filepath.exists(): - if video_filepath.suffix == ".mp4": - output_files.append(video_filepath) - continue - video_file = ( - video_filepath.as_posix() - .rsplit(video_filepath.parent.as_posix(), maxsplit=1)[-1] - .split("/")[-1] - ) - output_files.append( - _convert_mp4(video_file, video_path, output_path, videotype="mp4") + if not video_path or not Path(video_path).exists(): + raise FileNotFoundError(f"Video path does not exist: {video_path}") + + video_files = ( + [Path(video_path) / video_filename] + if video_filename + else Path(video_path).glob(f"*.{video_filetype}") + ) + + if len(video_files) != 1: + raise FileNotFoundError( + f"Found {len(video_files)} video files in {video_path}" ) - return output_files + video_filepath = video_files[0] + + if video_filepath.exists() and video_filepath.suffix == ".mp4": + return video_filepath + + video_file = ( + video_filepath.as_posix() + .rsplit(video_filepath.parent.as_posix(), maxsplit=1)[-1] + .split("/")[-1] + ) + return _convert_mp4(video_file, video_path, output_path, videotype="mp4") def _convert_mp4( filename: str, video_path: str, dest_path: str, - videotype: str, + videotype: str = "mp4", count_frames=False, return_output=True, ): @@ -531,107 +460,85 @@ def _convert_mp4( return_output: bool if True returns the destination filename """ + if videotype not in ["mp4"]: + raise NotImplementedError(f"videotype {videotype} not implemented") orig_filename = filename - video_path = pathlib.PurePath( - pathlib.Path(video_path), pathlib.Path(filename) - ) - if videotype not in ["mp4"]: - raise NotImplementedError + video_path = Path(video_path) / filename + dest_filename = os.path.splitext(filename)[0] if ".1" in dest_filename: dest_filename = os.path.splitext(dest_filename)[0] - dest_path = pathlib.Path(f"{dest_path}/{dest_filename}.{videotype}") - convert_command = [ - "ffmpeg", - "-vsync", - "passthrough", - "-i", - f"{video_path.as_posix()}", - "-codec", - "copy", - f"{dest_path.as_posix()}", - ] + dest_path = Path(f"{dest_path}/{dest_filename}.{videotype}") if dest_path.exists(): logger.info(f"{dest_path} already exists, skipping conversion") - else: - try: - sys.stdout.flush() - convert_process = subprocess.Popen( - convert_command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - except subprocess.CalledProcessError as err: - raise RuntimeError( - f"command {err.cmd} return with error (code {err.returncode}): {err.output}" - ) from err - out, _ = convert_process.communicate() - logger.info(out.decode("utf-8")) - logger.info(f"finished converting {filename}") - logger.info( - f"Checking that number of packets match between {orig_filename} and {dest_filename}" - ) - num_packets = [] - for file in [video_path, dest_path]: - packets_command = [ - "ffprobe", - "-v", - "error", - "-select_streams", - "v:0", - "-count_packets", - "-show_entries", - "stream=nb_read_packets", - "-of", - "csv=p=0", - file.as_posix(), - ] - frames_command = [ - "ffprobe", - "-v", - "error", - "-select_streams", - "v:0", - "-count_frames", - "-show_entries", - "stream=nb_read_frames", - "-of", - "csv=p=0", - file.as_posix(), - ] - if count_frames: - try: - check_process = subprocess.Popen( - frames_command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - except subprocess.CalledProcessError as err: - raise RuntimeError( - f"command {err.cmd} return with error (code {err.returncode}): {err.output}" - ) from err - else: - try: - check_process = subprocess.Popen( - packets_command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - except subprocess.CalledProcessError as err: - raise RuntimeError( - f"command {err.cmd} return with error (code {err.returncode}): {err.output}" - ) from err - out, _ = check_process.communicate() - num_packets.append(int(out.decode("utf-8").split("\n")[0])) - print( - f"Number of packets in {orig_filename}: {num_packets[0]}, {dest_filename}: {num_packets[1]}" - ) - assert num_packets[0] == num_packets[1] + return dest_path + + try: + sys.stdout.flush() + convert_process = subprocess.Popen( + [ + "ffmpeg", + "-vsync", + "passthrough", + "-i", + f"{video_path.as_posix()}", + "-codec", + "copy", + f"{dest_path.as_posix()}", + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + except subprocess.CalledProcessError as err: + raise RuntimeError( + f"Video convert errored: Code {err.returncode}, {err.output}" + ) from err + out, _ = convert_process.communicate() + logger.info(f"Finished converting {filename}") + + # check packets match orig file + logger.info(f"Checking packets match orig file: {dest_filename}") + orig_packets = _check_packets(video_path, count_frames=count_frames) + dest_packets = _check_packets(dest_path, count_frames=count_frames) + if orig_packets != dest_packets: + logger.warning(f"Conversion error: {orig_filename} -> {dest_filename}") + if return_output: return dest_path +def _check_packets(file, count_frames=False): + checked = "frames" if count_frames else "packets" + try: + check_process = subprocess.Popen( + [ + "ffprobe", + "-v", + "error", + "-select_streams", + "v:0", + f"-count_{checked}", + "-show_entries", + f"stream=nb_read_{checked}", + "-of", + "csv=p=0", + file.as_posix(), + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + except subprocess.CalledProcessError as err: + raise RuntimeError( + f"Check packets error: Code {err.returncode}, {err.output}" + ) from err + out, _ = check_process.communicate() + decoded_out = out.decode("utf-8").split("\n")[0] + if decoded_out.isnumeric(): + return int(decoded_out) + raise ValueError(f"Check packets error: {out}") + + def get_gpu_memory(): """Queries the gpu cluster and returns the memory use for each core. This is used to evaluate which GPU cores are available to run jobs on @@ -658,8 +565,7 @@ def output_to_list(x): )[1:] except subprocess.CalledProcessError as err: raise RuntimeError( - f"command {err.cmd} return with error (code {err.returncode}): " - + f"{err.output}" + f"Get GPU memory errored: Code {err.returncode}, {err.output}" ) from err memory_use_values = { i: int(x.split()[0]) for i, x in enumerate(memory_use_info) @@ -668,18 +574,6 @@ def output_to_list(x): def get_span_start_stop(indices): - """_summary_ - - Parameters - ---------- - indices : _type_ - _description_ - - Returns - ------- - _type_ - _description_ - """ span_inds = [] # Get start and stop index of spans of consecutive indices for k, g in groupby(enumerate(indices), lambda x: x[1] - x[0]): @@ -690,62 +584,83 @@ def get_span_start_stop(indices): def interp_pos(dlc_df, spans_to_interp, **kwargs): idx = pd.IndexSlice + + no_x_msg = "Index {ind} has no {coord}point with which to interpolate" + no_interp_msg = "Index {start} to {stop} not interpolated" + max_pts_to_interp = kwargs.get("max_pts_to_interp", float("inf")) + max_cm_to_interp = kwargs.get("max_cm_to_interp", float("inf")) + + def _get_new_dim(dim, span_start, span_stop, start_time, stop_time): + return np.interp( + x=dlc_df.index[span_start : span_stop + 1], + xp=[start_time, stop_time], + fp=[dim[0], dim[-1]], + ) + for ind, (span_start, span_stop) in enumerate(spans_to_interp): + idx_span = idx[span_start:span_stop] + if (span_stop + 1) >= len(dlc_df): - dlc_df.loc[idx[span_start:span_stop], idx["x"]] = np.nan - dlc_df.loc[idx[span_start:span_stop], idx["y"]] = np.nan - print(f"ind: {ind} has no endpoint with which to interpolate") + dlc_df.loc[idx_span, idx[["x", "y"]]] = np.nan + logger.info(no_x_msg.format(ind=ind, coord="end")) continue if span_start < 1: - dlc_df.loc[idx[span_start:span_stop], idx["x"]] = np.nan - dlc_df.loc[idx[span_start:span_stop], idx["y"]] = np.nan - print(f"ind: {ind} has no startpoint with which to interpolate") + dlc_df.loc[idx_span, idx[["x", "y"]]] = np.nan + logger.info(no_x_msg.format(ind=ind, coord="start")) continue + x = [dlc_df["x"].iloc[span_start - 1], dlc_df["x"].iloc[span_stop + 1]] y = [dlc_df["y"].iloc[span_start - 1], dlc_df["y"].iloc[span_stop + 1]] + span_len = int(span_stop - span_start + 1) start_time = dlc_df.index[span_start] stop_time = dlc_df.index[span_stop] - if "max_pts_to_interp" in kwargs: - if span_len > kwargs["max_pts_to_interp"]: - dlc_df.loc[idx[span_start:span_stop], idx["x"]] = np.nan - dlc_df.loc[idx[span_start:span_stop], idx["y"]] = np.nan - print( - f"inds {span_start} to {span_stop} " - f"length: {span_len} not interpolated" - ) - if "max_cm_to_interp" in kwargs: - if ( - np.linalg.norm(np.array([x[0], y[0]]) - np.array([x[1], y[1]])) - > kwargs["max_cm_to_interp"] - ): - dlc_df.loc[idx[start_time:stop_time], idx["x"]] = np.nan - dlc_df.loc[idx[start_time:stop_time], idx["y"]] = np.nan - change = np.linalg.norm( - np.array([x[0], y[0]]) - np.array([x[1], y[1]]) - ) - print( - f"inds {span_start} to {span_stop + 1} " - f"with change in position: {change:.2f} not interpolated" - ) + change = np.linalg.norm(np.array([x[0], y[0]]) - np.array([x[1], y[1]])) + + if span_len > max_pts_to_interp or change > max_cm_to_interp: + dlc_df.loc[idx_span, idx[["x", "y"]]] = np.nan + logger.info(no_interp_msg.format(start=span_start, stop=span_stop)) + if change > max_cm_to_interp: continue - xnew = np.interp( - x=dlc_df.index[span_start : span_stop + 1], - xp=[start_time, stop_time], - fp=[x[0], x[-1]], - ) - ynew = np.interp( - x=dlc_df.index[span_start : span_stop + 1], - xp=[start_time, stop_time], - fp=[y[0], y[-1]], - ) + xnew = _get_new_dim(x, span_start, span_stop, start_time, stop_time) + ynew = _get_new_dim(y, span_start, span_stop, start_time, stop_time) + dlc_df.loc[idx[start_time:stop_time], idx["x"]] = xnew dlc_df.loc[idx[start_time:stop_time], idx["y"]] = ynew return dlc_df +def interp_orientation(df, spans_to_interp, **kwargs): + idx = pd.IndexSlice + no_x_msg = "Index {ind} has no {x}point with which to interpolate" + df_orient = df["orientation"] + + for ind, (span_start, span_stop) in enumerate(spans_to_interp): + idx_span = idx[span_start:span_stop] + if (span_stop + 1) >= len(df): + df.loc[idx_span, idx["orientation"]] = np.nan + logger.info(no_x_msg.format(ind=ind, x="stop")) + continue + if span_start < 1: + df.loc[idx_span, idx["orientation"]] = np.nan + logger.info(no_x_msg.format(ind=ind, x="start")) + continue + + orient = [df_orient.iloc[span_start - 1], df_orient.iloc[span_stop + 1]] + + start_time = df.index[span_start] + stop_time = df.index[span_stop] + orientnew = np.interp( + x=df.index[span_start : span_stop + 1], + xp=[start_time, stop_time], + fp=[orient[0], orient[-1]], + ) + df.loc[idx[start_time:stop_time], idx["orientation"]] = orientnew + return df + + def smooth_moving_avg( interp_df, smoothing_duration: float, sampling_rate: int, **kwargs ): @@ -753,6 +668,7 @@ def smooth_moving_avg( idx = pd.IndexSlice moving_avg_window = int(np.round(smoothing_duration * sampling_rate)) + xy_arr = interp_df.loc[:, idx[("x", "y")]].values smoothed_xy_arr = bn.move_mean( xy_arr, window=moving_avg_window, axis=0, min_count=1 @@ -768,6 +684,71 @@ def smooth_moving_avg( } +def two_pt_head_orientation(pos_df: pd.DataFrame, **params): + """Determines orientation based on vector between two points""" + BP1 = params.pop("bodypart1", None) + BP2 = params.pop("bodypart2", None) + orientation = np.arctan2( + (pos_df[BP1]["y"] - pos_df[BP2]["y"]), + (pos_df[BP1]["x"] - pos_df[BP2]["x"]), + ) + return orientation + + +def no_orientation(pos_df: pd.DataFrame, **params): + fill_value = params.pop("fill_with", np.nan) + n_frames = len(pos_df) + orientation = np.full( + shape=(n_frames), fill_value=fill_value, dtype=np.float16 + ) + return orientation + + +def red_led_bisector_orientation(pos_df: pd.DataFrame, **params): + """Determines orientation based on 2 equally-spaced identifiers + + Identifiers are assumed to be perpendicular to the orientation direction. + A third object is needed to determine forward/backward + """ # timeit reported 3500x improvement for vectorized implementation + LED1 = params.pop("led1", None) + LED2 = params.pop("led2", None) + LED3 = params.pop("led3", None) + + x_vec = pos_df[[LED1, LED2]].diff(axis=1).iloc[:, 0] + y_vec = pos_df[[LED1, LED2]].diff(axis=1).iloc[:, 1] + + y_is_zero = y_vec.eq(0) + perp_direction = pos_df[[LED3]].diff(axis=1) + + # Handling the special case where y_vec is zero all Ys are the same + special_case = ( + y_is_zero + & (pos_df[LED3]["y"] == pos_df[LED1]["y"]) + & (pos_df[LED3]["y"] == pos_df[LED2]["y"]) + ) + if special_case.any(): + raise Exception("Cannot determine head direction from bisector") + + orientation = np.zeros(len(pos_df)) + orientation[y_is_zero & perp_direction.iloc[:, 0].gt(0)] = np.pi / 2 + orientation[y_is_zero & perp_direction.iloc[:, 0].lt(0)] = -np.pi / 2 + + orientation[~y_is_zero & ~x_vec.eq(0)] = np.arctan2( + y_vec[~y_is_zero], x_vec[~x_vec.eq(0)] + ) + + return orientation + + +# Add new functions for orientation calculation here + +_key_to_func_dict = { + "none": no_orientation, + "red_green_orientation": two_pt_head_orientation, + "red_led_bisector": red_led_bisector_orientation, +} + + def fill_nan(variable, video_time, variable_time): video_ind = np.digitize(variable_time, video_time[1:]) @@ -782,8 +763,9 @@ def fill_nan(variable, video_time, variable_time): return filled_variable -def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): +def convert_to_pixels(data, frame_size=None, cm_to_pixels=1.0): """Converts from cm to pixels and flips the y-axis. + Parameters ---------- data : ndarray, shape (n_time, 2) @@ -797,503 +779,157 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): return data / cm_to_pixels -def make_video( - video_filename, - video_frame_inds, - position_mean, - orientation_mean, - centroids, - likelihoods, - position_time, - video_time=None, - processor="opencv", - frames=None, - percent_frames=1, - output_video_filename="output.mp4", - cm_to_pixels=1.0, - disable_progressbar=False, - crop=None, - arrow_radius=15, - circle_radius=8, -): - import cv2 - - RGB_PINK = (234, 82, 111) - RGB_YELLOW = (253, 231, 76) - # RGB_WHITE = (255, 255, 255) - RGB_BLUE = (30, 144, 255) - RGB_ORANGE = (255, 127, 80) - # "#29ff3e", - # "#ff0073", - # "#ff291a", - # "#1e2cff", - # "#b045f3", - # "#ffe91a", - # ] - if processor == "opencv": - video = cv2.VideoCapture(video_filename) - fourcc = cv2.VideoWriter_fourcc(*"mp4v") - frame_size = (int(video.get(3)), int(video.get(4))) - frame_rate = video.get(5) - if frames is not None: - n_frames = len(frames) - else: - n_frames = int(len(video_frame_inds) * percent_frames) - frames = np.arange(0, n_frames) - print( - f"video save path: {output_video_filename}\n{n_frames} frames in total." - ) - if crop: - crop_offset_x = crop[0] - crop_offset_y = crop[2] - frame_size = (crop[1] - crop[0], crop[3] - crop[2]) - out = cv2.VideoWriter( - output_video_filename, fourcc, frame_rate, frame_size, True +class Centroid: + def __init__(self, pos_df, points, max_LED_separation=None): + if max_LED_separation is None and len(points) != 1: + raise ValueError("max_LED_separation must be provided") + if len(points) not in [1, 2, 4]: + raise ValueError("Invalid number of points") + + self.pos_df = pos_df + self.max_LED_separation = max_LED_separation + self.points_dict = points + self.point_names = list(points.values()) + self.idx = pd.IndexSlice + self.centroid = np.zeros(shape=(len(pos_df), 2)) + self.coords = { + p: pos_df.loc[:, self.idx[p, ("x", "y")]].to_numpy() + for p in self.point_names + } + self.nans = { + p: np.isnan(coord).any(axis=1) for p, coord in self.coords.items() + } + + if len(points) == 1: + self.get_1pt_centroid() + return + if len(points) in [2, 4]: # 4 also requires 2 + self.get_2pt_centroid() + if len(points) == 4: + self.get_4pt_centroid() + + def calc_centroid( + self, + mask: tuple, + points: list = None, + replace: bool = False, + midpoint: bool = False, + logical_or: bool = False, + ): + """Calculate the centroid of the points in the mask + + Parameters + ---------- + mask : Union[tuple, list] + Tuple of masks to apply to the points. Default is np.logical_and + over a tuple. If a list is passed, then np.logical_or is used. + List cannoot be used with logical_or=True + points : list, optional + List of points to calculate the centroid of. For replace, not needed + replace : bool, optional + Special case for replacing mask with nans, by default False + logical_or : bool, optional + Whether to use logical_and or logical_or to combine mask tuple. + """ + if isinstance(mask, list): + mask = [reduce(np.logical_and, m) for m in mask] + + if points is not None: # Check that combinations of points close enough + for pair in combinations(points, 2): + mask = (*mask, ~self.too_sep(pair[0], pair[1])) + + func = np.logical_or if logical_or else np.logical_and + mask = reduce(func, mask) + + if not np.any(mask): + return + if replace: + self.centroid[mask] = np.nan + return + if len(points) == 1: # only one point + self.centroid[mask] = self.coords[points[0]][mask] + return + elif len(points) == 3: + self.coords["midpoint"] = ( + self.coords[points[0]] + self.coords[points[1]] + ) / 2 + points = ["midpoint", points[2]] + coord_arrays = np.array([self.coords[point][mask] for point in points]) + self.centroid[mask] = np.nanmean(coord_arrays, axis=0) + + def too_sep(self, point1, point2): + """Check if points are too far apart""" + return ( + get_distance(self.coords[point1], self.coords[point2]) + >= self.max_LED_separation ) - print(f"video_output: {output_video_filename}") - - # centroids = { - # color: self.fill_nan(data, video_time, position_time) - # for color, data in centroids.items() - # } - if video_time: - position_mean = { - key: fill_nan( - position_mean[key]["position"], video_time, position_time - ) - for key in position_mean.keys() - } - orientation_mean = { - key: fill_nan( - position_mean[key]["orientation"], video_time, position_time - ) - for key in position_mean.keys() - # CBroz: Bug was here, using nonexistent orientation_mean dict - } - print( - f"frames start: {frames[0]}\nvideo_frames start: " - + f"{video_frame_inds[0]}\ncv2 frame ind start: {int(video.get(1))}" + + def get_1pt_centroid(self): + """Passthrough. If point is NaN, then centroid is NaN.""" + PT1 = self.points_dict.get("point1", None) + self.calc_centroid( + mask=(~self.nans[PT1],), + points=[PT1], ) - for time_ind in tqdm( - frames, desc="frames", disable=disable_progressbar - ): - if time_ind == 0: - video.set(1, time_ind + 1) - elif int(video.get(1)) != time_ind - 1: - video.set(1, time_ind - 1) - is_grabbed, frame = video.read() - - if is_grabbed: - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if crop: - frame = frame[crop[2] : crop[3], crop[0] : crop[1]].copy() - if time_ind < video_frame_inds[0] - 1: - cv2.putText( - img=frame, - text=f"time_ind: {int(time_ind)} video frame: {int(video.get(1))}", - org=(10, 10), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=0.5, - color=RGB_YELLOW, - thickness=1, - ) - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - out.write(frame) - continue - cv2.putText( - img=frame, - text=f"time_ind: {int(time_ind)} video frame: {int(video.get(1))}", - org=(10, 10), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=0.5, - color=RGB_YELLOW, - thickness=1, - ) - pos_ind = time_ind - video_frame_inds[0] - # red_centroid = centroids["red"][time_ind] - # green_centroid = centroids["green"][time_ind] - for key in position_mean.keys(): - position = position_mean[key][pos_ind] - # if crop: - # position = np.hstack( - # ( - # convert_to_pixels( - # position[0, np.newaxis], - # frame_size, - # cm_to_pixels, - # ) - # - crop_offset_x, - # convert_to_pixels( - # position[1, np.newaxis], - # frame_size, - # cm_to_pixels, - # ) - # - crop_offset_y, - # ) - # ) - # else: - # position = convert_to_pixels(position, frame_size, cm_to_pixels) - position = convert_to_pixels( - position, frame_size, cm_to_pixels - ) - orientation = orientation_mean[key][pos_ind] - if key == "DLC": - color = RGB_BLUE - if key == "Trodes": - color = RGB_ORANGE - if key == "Common": - color = RGB_PINK - if np.all(~np.isnan(position)) & np.all( - ~np.isnan(orientation) - ): - arrow_tip = ( - int( - position[0] + arrow_radius * np.cos(orientation) - ), - int( - position[1] + arrow_radius * np.sin(orientation) - ), - ) - cv2.arrowedLine( - img=frame, - pt1=tuple(position.astype(int)), - pt2=arrow_tip, - color=color, - thickness=4, - line_type=8, - shift=cv2.CV_8U, - tipLength=0.25, - ) - - if np.all(~np.isnan(position)): - cv2.circle( - img=frame, - center=tuple(position.astype(int)), - radius=circle_radius, - color=color, - thickness=-1, - shift=cv2.CV_8U, - ) - # if np.all(~np.isnan(red_centroid)): - # cv2.circle( - # img=frame, - # center=tuple(red_centroid.astype(int)), - # radius=circle_radius, - # color=RGB_YELLOW, - # thickness=-1, - # shift=cv2.CV_8U, - # ) - - # if np.all(~np.isnan(green_centroid)): - # cv2.circle( - # img=frame, - # center=tuple(green_centroid.astype(int)), - # radius=circle_radius, - # color=RGB_PINK, - # thickness=-1, - # shift=cv2.CV_8U, - # ) - - # if np.all(~np.isnan(head_position)) & np.all( - # ~np.isnan(head_orientation) - # ): - # arrow_tip = ( - # int(head_position[0] + arrow_radius * np.cos(head_orientation)), - # int(head_position[1] + arrow_radius * np.sin(head_orientation)), - # ) - # cv2.arrowedLine( - # img=frame, - # pt1=tuple(head_position.astype(int)), - # pt2=arrow_tip, - # color=RGB_WHITE, - # thickness=4, - # line_type=8, - # shift=cv2.CV_8U, - # tipLength=0.25, - # ) - - # if np.all(~np.isnan(head_position)): - # cv2.circle( - # img=frame, - # center=tuple(head_position.astype(int)), - # radius=circle_radius, - # color=RGB_WHITE, - # thickness=-1, - # shift=cv2.CV_8U, - # ) - - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - out.write(frame) - else: - print("not grabbed") - break - print("releasing video") - video.release() - out.release() - print("destroying cv2 windows") - try: - cv2.destroyAllWindows() - except cv2.error: # if cv is already closed or does not have func - pass - print("finished making video with opencv") - return - elif processor == "matplotlib": - import matplotlib.animation as animation - import matplotlib.font_manager as fm - - position_mean = position_mean["DLC"] - orientation_mean = orientation_mean["DLC"] - video_slowdown = 1 - - # Set up formatting for the movie files - window_size = 501 - if likelihoods: - plot_likelihood = True - elif likelihoods is None: - plot_likelihood = False - - window_ind = np.arange(window_size) - window_size // 2 - # Get video frames - assert pathlib.Path( - video_filename - ).exists(), f"Path to video: {video_filename} does not exist" - color_swatch = [ - "#29ff3e", - "#ff0073", - "#ff291a", - "#1e2cff", - "#b045f3", - "#ffe91a", - ] - video = cv2.VideoCapture(video_filename) - fourcc = cv2.VideoWriter_fourcc(*"mp4v") - frame_size = (int(video.get(3)), int(video.get(4))) - frame_rate = video.get(5) - Writer = animation.writers["ffmpeg"] - if frames is not None: - n_frames = len(frames) - else: - n_frames = int(len(video_frame_inds) * percent_frames) - frames = np.arange(0, n_frames) - print( - f"video save path: {output_video_filename}\n{n_frames} frames in total." + def get_2pt_centroid(self): + self.calc_centroid( # Good points + points=self.point_names, + mask=(~self.nans[p] for p in self.point_names), ) - fps = int(np.round(frame_rate / video_slowdown)) - writer = Writer(fps=fps, bitrate=-1) - ret, frame = video.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if crop: - frame = frame[crop[2] : crop[3], crop[0] : crop[1]].copy() - crop_offset_x = crop[0] - crop_offset_y = crop[2] - frame_ind = 0 - with plt.style.context("dark_background"): - # Set up plots - fig, axes = plt.subplots( - 2, - 1, - figsize=(8, 6), - gridspec_kw={"height_ratios": [8, 1]}, - constrained_layout=False, + self.calc_centroid(mask=self.nans.values(), replace=True) # All bad + for point in self.point_names: # only one point + self.calc_centroid( + points=[point], + mask=( + ~self.nans[point], + *[self.nans[p] for p in self.point_names if p != point], + ), ) - axes[0].tick_params(colors="white", which="both") - axes[0].spines["bottom"].set_color("white") - axes[0].spines["left"].set_color("white") - image = axes[0].imshow(frame, animated=True) - print(f"frame after init plot: {video.get(1)}") - centroid_plot_objs = { - bodypart: axes[0].scatter( - [], - [], - s=2, - zorder=102, - color=color, - label=f"{bodypart} position", - animated=True, - alpha=0.6, - ) - for color, bodypart in zip(color_swatch, centroids.keys()) - } - centroid_position_dot = axes[0].scatter( - [], - [], - s=5, - zorder=102, - color="#b045f3", - label="centroid position", - animated=True, - alpha=0.6, - ) - (orientation_line,) = axes[0].plot( - [], - [], - color="cyan", - linewidth=1, - animated=True, - label="Orientation", - ) - axes[0].set_xlabel("") - axes[0].set_ylabel("") - ratio = frame_size[1] / frame_size[0] - if crop: - ratio = (crop[3] - crop[2]) / (crop[1] - crop[0]) - x_left, x_right = axes[0].get_xlim() - y_low, y_high = axes[0].get_ylim() - axes[0].set_aspect( - abs((x_right - x_left) / (y_low - y_high)) * ratio - ) - axes[0].spines["top"].set_color("black") - axes[0].spines["right"].set_color("black") - time_delta = pd.Timedelta( - position_time[0] - position_time[0] - ).total_seconds() - axes[0].legend(loc="lower right", fontsize=4) - title = axes[0].set_title( - f"time = {time_delta:3.4f}s\n frame = {frame_ind}", - fontsize=8, - ) - _ = fm.FontProperties(size=12) - axes[0].axis("off") - if plot_likelihood: - likelihood_objs = { - bodypart: axes[1].plot( - [], - [], - color=color, - linewidth=1, - animated=True, - clip_on=False, - label=bodypart, - )[0] - for color, bodypart in zip(color_swatch, likelihoods.keys()) - } - axes[1].set_ylim((0.0, 1)) - print(f"frame_rate: {frame_rate}") - axes[1].set_xlim( - ( - window_ind[0] / frame_rate, - window_ind[-1] / frame_rate, - ) - ) - axes[1].set_xlabel("Time [s]") - axes[1].set_ylabel("Likelihood") - axes[1].set_facecolor("black") - axes[1].spines["top"].set_color("black") - axes[1].spines["right"].set_color("black") - axes[1].legend(loc="upper right", fontsize=4) - progress_bar = tqdm(leave=True, position=0) - progress_bar.reset(total=n_frames) - - def _update_plot(time_ind): - if time_ind == 0: - video.set(1, time_ind + 1) - else: - video.set(1, time_ind - 1) - ret, frame = video.read() - if ret: - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if crop: - frame = frame[ - crop[2] : crop[3], crop[0] : crop[1] - ].copy() - image.set_array(frame) - pos_ind = np.where(video_frame_inds == time_ind)[0] - if len(pos_ind) == 0: - centroid_position_dot.set_offsets((np.NaN, np.NaN)) - for bodypart in centroid_plot_objs.keys(): - centroid_plot_objs[bodypart].set_offsets( - (np.NaN, np.NaN) - ) - orientation_line.set_data((np.NaN, np.NaN)) - title.set_text(f"time = {0:3.4f}s\n frame = {time_ind}") - else: - pos_ind = pos_ind[0] - dlc_centroid_data = convert_to_pixels( - position_mean[pos_ind], frame, cm_to_pixels - ) - if crop: - dlc_centroid_data = np.hstack( - ( - convert_to_pixels( - position_mean[pos_ind, 0, np.newaxis], - frame, - cm_to_pixels, - ) - - crop_offset_x, - convert_to_pixels( - position_mean[pos_ind, 1, np.newaxis], - frame, - cm_to_pixels, - ) - - crop_offset_y, - ) - ) - for bodypart in centroid_plot_objs.keys(): - centroid_plot_objs[bodypart].set_offsets( - convert_to_pixels( - centroids[bodypart][pos_ind], - frame, - cm_to_pixels, - ) - ) - centroid_position_dot.set_offsets(dlc_centroid_data) - r = 30 - orientation_line.set_data( - [ - dlc_centroid_data[0], - dlc_centroid_data[0] - + r * np.cos(orientation_mean[pos_ind]), - ], - [ - dlc_centroid_data[1], - dlc_centroid_data[1] - + r * np.sin(orientation_mean[pos_ind]), - ], - ) - # Need to convert times to datetime object probably. - - time_delta = pd.Timedelta( - pd.to_datetime(position_time[pos_ind] * 1e9, unit="ns") - - pd.to_datetime(position_time[0] * 1e9, unit="ns") - ).total_seconds() - title.set_text( - f"time = {time_delta:3.4f}s\n frame = {time_ind}" - ) - likelihood_inds = pos_ind + window_ind - neg_inds = np.where(likelihood_inds < 0)[0] - over_inds = np.where( - likelihood_inds - > (len(likelihoods[list(likelihood_objs.keys())[0]])) - - 1 - )[0] - if len(neg_inds) > 0: - likelihood_inds[neg_inds] = 0 - if len(over_inds) > 0: - likelihood_inds[neg_inds] = -1 - for bodypart in likelihood_objs.keys(): - likelihood_objs[bodypart].set_data( - window_ind / frame_rate, - np.asarray(likelihoods[bodypart][likelihood_inds]), - ) - progress_bar.update() - - return ( - image, - centroid_position_dot, - orientation_line, - title, - ) - - movie = animation.FuncAnimation( - fig, - _update_plot, - frames=frames, - interval=1000 / fps, - blit=True, + def get_4pt_centroid(self): + green = self.points_dict.get("greenLED", None) + red_C = self.points_dict.get("redLED_C", None) + red_L = self.points_dict.get("redLED_L", None) + red_R = self.points_dict.get("redLED_R", None) + + self.calc_centroid( # Good green and center + points=[green, red_C], + mask=(~self.nans[green], ~self.nans[red_C]), + ) + + self.calc_centroid( # green, left/right - average left/right + points=[red_L, red_R, green], + mask=( + ~self.nans[green], + self.nans[red_C], + ~self.nans[red_L], + ~self.nans[red_R], + ), + ) + + self.calc_centroid( # only left/right + points=[red_L, red_R], + mask=( + self.nans[green], + self.nans[red_C], + ~self.nans[red_L], + ~self.nans[red_R], + ), + ) + + for side, other in [red_L, red_R], [red_R, red_L]: + self.calc_centroid( # green and one side are good, others are NaN + points=[side, green], + mask=( + ~self.nans[green], + self.nans[red_C], + ~self.nans[side], + self.nans[other], + ), ) - movie.save(output_video_filename, writer=writer, dpi=400) - video.release() - print("finished making video with matplotlib") - return + + self.calc_centroid( # green is NaN, red center is good + points=[red_C], + mask=(self.nans[green], ~self.nans[red_C]), + ) diff --git a/src/spyglass/position/v1/dlc_utils_makevid.py b/src/spyglass/position/v1/dlc_utils_makevid.py new file mode 100644 index 000000000..0a80254e1 --- /dev/null +++ b/src/spyglass/position/v1/dlc_utils_makevid.py @@ -0,0 +1,562 @@ +# Convenience functions +# some DLC-utils copied from datajoint element-interface utils.py +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from tqdm import tqdm as tqdm + +from spyglass.position.v1.dlc_utils import convert_to_pixels as _to_px +from spyglass.position.v1.dlc_utils import fill_nan +from spyglass.utils import logger + +RGB_PINK = (234, 82, 111) +RGB_YELLOW = (253, 231, 76) +RGB_BLUE = (30, 144, 255) +RGB_ORANGE = (255, 127, 80) +RGB_WHITE = (255, 255, 255) +COLOR_SWATCH = [ + "#29ff3e", + "#ff0073", + "#ff291a", + "#1e2cff", + "#b045f3", + "#ffe91a", +] + + +class VideoMaker: + def __init__( + self, + video_filename, + position_mean, + orientation_mean, + centroids, + position_time, + video_frame_inds=None, + likelihoods=None, + processor="opencv", # opencv, opencv-trodes, matplotlib + video_time=None, + frames=None, + percent_frames=1, + output_video_filename="output.mp4", + cm_to_pixels=1.0, + disable_progressbar=False, + crop=None, + arrow_radius=15, + circle_radius=8, + ): + self.video_filename = video_filename + self.video_frame_inds = video_frame_inds + self.position_mean = position_mean + self.orientation_mean = orientation_mean + self.centroids = centroids + self.likelihoods = likelihoods + self.position_time = position_time + self.processor = processor + self.video_time = video_time + self.frames = frames + self.percent_frames = percent_frames + self.output_video_filename = output_video_filename + self.cm_to_pixels = cm_to_pixels + self.disable_progressbar = disable_progressbar + self.crop = crop + self.arrow_radius = arrow_radius + self.circle_radius = circle_radius + + if not Path(self.video_filename).exists(): + raise FileNotFoundError(f"Video not found: {self.video_filename}") + + if frames is None: + self.n_frames = ( + int(self.orientation_mean.shape[0]) + if processor == "opencv-trodes" + else int(len(video_frame_inds) * percent_frames) + ) + self.frames = np.arange(0, self.n_frames) + else: + self.n_frames = len(frames) + + self.tqdm_kwargs = { + "iterable": ( + range(self.n_frames - 1) + if self.processor == "opencv-trodes" + else self.frames + ), + "desc": "frames", + "disable": self.disable_progressbar, + } + + # init for cv + self.video, self.frame_size = None, None + self.frame_rate, self.out = None, None + self.source_map = { + "DLC": RGB_BLUE, + "Trodes": RGB_ORANGE, + "Common": RGB_PINK, + } + + # intit for matplotlib + self.image, self.title, self.progress_bar = None, None, None + self.crop_offset_x = crop[0] if crop else 0 + self.crop_offset_y = crop[2] if crop else 0 + self.centroid_plot_objs, self.centroid_position_dot = None, None + self.orientation_line = None + self.likelihood_objs = None + self.window_ind = np.arange(501) - 501 // 2 + + self.make_video() + + def make_video(self): + if self.processor == "opencv": + self.make_video_opencv() + elif self.processor == "opencv-trodes": + self.make_trodes_video() + elif self.processor == "matplotlib": + self.make_video_matplotlib() + + def _init_video(self): + logger.info(f"Making video: {self.output_video_filename}") + self.video = cv2.VideoCapture(self.video_filename) + self.frame_size = ( + (int(self.video.get(3)), int(self.video.get(4))) + if not self.crop + else ( + self.crop[1] - self.crop[0], + self.crop[3] - self.crop[2], + ) + ) + self.frame_rate = self.video.get(5) + + def _init_cv_video(self): + _ = self._init_video() + self.out = cv2.VideoWriter( + filename=self.output_video_filename, + fourcc=cv2.VideoWriter_fourcc(*"mp4v"), + fps=self.frame_rate, + frameSize=self.frame_size, + isColor=True, + ) + frames_log = ( + f"\tFrames start: {self.frames[0]}\n" if np.any(self.frames) else "" + ) + inds_log = ( + f"\tVideo frame inds: {self.video_frame_inds[0]}\n" + if np.any(self.video_frame_inds) + else "" + ) + logger.info( + f"\n{frames_log}{inds_log}\tcv2 ind start: {int(self.video.get(1))}" + ) + + def _close_cv_video(self): + self.video.release() + self.out.release() + try: + cv2.destroyAllWindows() + except cv2.error: # if cv is already closed or does not have func + pass + logger.info(f"Finished video: {self.output_video_filename}") + + def _get_frame(self, frame, init_only=False, crop_order=(0, 1, 2, 3)): + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + if init_only or not self.crop: + return frame + x1, x2, y1, y2 = self.crop_order + return frame[ + self.crop[x1] : self.crop[x2], self.crop[y1] : self.crop[y2] + ].copy() + + def _video_set_by_ind(self, time_ind): + if time_ind == 0: + self.video.set(1, time_ind + 1) + elif int(self.video.get(1)) != time_ind - 1: + self.video.set(1, time_ind - 1) + + def _all_num(self, *args): + return all(np.all(~np.isnan(data)) for data in args) + + def _make_arrow( + self, + position, + orientation, + color, + img, + thickness=4, + line_type=8, + tipLength=0.25, + shift=cv2.CV_8U, + ): + if not self._all_num(position, orientation): + return + arrow_tip = ( + int(position[0] + self.arrow_radius * np.cos(orientation)), + int(position[1] + self.arrow_radius * np.sin(orientation)), + ) + cv2.arrowedLine( + img=img, + pt1=tuple(position.astype(int)), + pt2=arrow_tip, + color=color, + thickness=thickness, + line_type=line_type, + tipLength=tipLength, + shift=shift, + ) + + def _make_circle( + self, + data, + color, + img, + radius=None, + thickness=-1, + shift=cv2.CV_8U, + **kwargs, + ): + if not self._all_num(data): + return + cv2.circle( + img=img, + center=tuple(data.astype(int)), + radius=radius or self.circle_radius, + color=color, + thickness=thickness, + shift=shift, + ) + + def make_video_opencv(self): + _ = self._init_cv_video() + + if self.video_time: + self.position_mean = { + key: fill_nan( + self.position_mean[key]["position"], + self.video_time, + self.position_time, + ) + for key in self.position_mean.keys() + } + self.orientation_mean = { + key: fill_nan( + self.position_mean[key]["orientation"], + self.video_time, + self.position_time, + ) + for key in self.position_mean.keys() + } + + for time_ind in tqdm(**self.tqdm_kwargs): + _ = self._video_set_by_ind(time_ind) + + is_grabbed, frame = self.video.read() + + if not is_grabbed: + break + + frame = self._get_frame(frame) + + cv2.putText( + img=frame, + text=f"time_ind: {int(time_ind)} video frame: {int(self.video.get(1))}", + org=(10, 10), + fontFace=cv2.FONT_HERSHEY_SIMPLEX, + fontScale=0.5, + color=RGB_YELLOW, + thickness=1, + ) + + if time_ind < self.video_frame_inds[0] - 1: + self.out.write(self._get_frame(frame, init_only=True)) + continue + + pos_ind = time_ind - self.video_frame_inds[0] + + for key in self.position_mean: + position = _to_px( + data=self.position_mean[key][pos_ind], + cm_to_pixels=self.cm_to_pixels, + ) + orientation = self.orientation_mean[key][pos_ind] + cv_kwargs = { + "img": frame, + "color": self.source_map[key], + } + self._make_arrow(position, orientation, **cv_kwargs) + self._make_circle(data=position, **cv_kwargs) + + self._get_frame(frame, init_only=True) + self.out.write(frame) + self._close_cv_video() + return + + def make_trodes_video(self): + _ = self._init_cv_video() + + if np.any(self.video_time): + centroids = { + color: fill_nan( + variable=data, + video_time=self.video_time, + variable_time=self.position_time, + ) + for color, data in self.centroids.items() + } + position_mean = fill_nan( + self.position_mean, self.video_time, self.position_time + ) + orientation_mean = fill_nan( + self.orientation_mean, self.video_time, self.position_time + ) + + for time_ind in tqdm(**self.tqdm_kwargs): + is_grabbed, frame = self.video.read() + if not is_grabbed: + break + + frame = self._get_frame(frame) + + red_centroid = centroids["red"][time_ind] + green_centroid = centroids["green"][time_ind] + position = position_mean[time_ind] + position = _to_px(data=position, cm_to_pixels=self.cm_to_pixels) + orientation = orientation_mean[time_ind] + + self._make_circle(data=red_centroid, img=frame, color=RGB_YELLOW) + self._make_circle(data=green_centroid, img=frame, color=RGB_PINK) + self._make_arrow( + position=position, + orientation=orientation, + color=RGB_WHITE, + img=frame, + ) + self._make_circle(data=position, img=frame, color=RGB_WHITE) + self._get_frame(frame, init_only=True) + self.out.write(frame) + + self._close_cv_video() + + def make_video_matplotlib(self): + import matplotlib.animation as animation + + self.position_mean = self.position_mean["DLC"] + self.orientation_mean = self.orientation_mean["DLC"] + + _ = self._init_video() + + video_slowdown = 1 + fps = int(np.round(self.frame_rate / video_slowdown)) + Writer = animation.writers["ffmpeg"] + writer = Writer(fps=fps, bitrate=-1) + + ret, frame = self.video.read() + frame = self._get_frame(frame, crop_order=(2, 3, 0, 1)) + + frame_ind = 0 + plt.style.use("dark_background") + fig, axes = plt.subplots( + 2, + 1, + figsize=(8, 6), + gridspec_kw={"height_ratios": [8, 1]}, + constrained_layout=False, + ) + + axes[0].tick_params(colors="white", which="both") + axes[0].spines["bottom"].set_color("white") + axes[0].spines["left"].set_color("white") + self.image = axes[0].imshow(frame, animated=True) + + logger.info(f"frame after init plot: {self.video.get(1)}") + + self.centroid_plot_objs = { + bodypart: axes[0].scatter( + [], + [], + s=2, + zorder=102, + color=color, + label=f"{bodypart} position", + animated=True, + alpha=0.6, + ) + for color, bodypart in zip(COLOR_SWATCH, self.centroids.keys()) + } + self.centroid_position_dot = axes[0].scatter( + [], + [], + s=5, + zorder=102, + color="#b045f3", + label="centroid position", + animated=True, + alpha=0.6, + ) + (self.orientation_line,) = axes[0].plot( + [], + [], + color="cyan", + linewidth=1, + animated=True, + label="Orientation", + ) + + axes[0].set_xlabel("") + axes[0].set_ylabel("") + + ratio = ( + (self.crop[3] - self.crop[2]) / (self.crop[1] - self.crop[0]) + if self.crop + else self.frame_size[1] / self.frame_size[0] + ) + + x_left, x_right = axes[0].get_xlim() + y_low, y_high = axes[0].get_ylim() + + axes[0].set_aspect(abs((x_right - x_left) / (y_low - y_high)) * ratio) + axes[0].spines["top"].set_color("black") + axes[0].spines["right"].set_color("black") + + time_delta = pd.Timedelta( + self.position_time[0] - self.position_time[-1] + ).total_seconds() + + axes[0].legend(loc="lower right", fontsize=4) + self.title = axes[0].set_title( + f"time = {time_delta:3.4f}s\n frame = {frame_ind}", + fontsize=8, + ) + axes[0].axis("off") + + if self.likelihoods: + self.likelihood_objs = { + bodypart: axes[1].plot( + [], + [], + color=color, + linewidth=1, + animated=True, + clip_on=False, + label=bodypart, + )[0] + for color, bodypart in zip( + COLOR_SWATCH, self.likelihoods.keys() + ) + } + axes[1].set_ylim((0.0, 1)) + axes[1].set_xlim( + ( + self.window_ind[0] / self.frame_rate, + self.window_ind[-1] / self.frame_rate, + ) + ) + axes[1].set_xlabel("Time [s]") + axes[1].set_ylabel("Likelihood") + axes[1].set_facecolor("black") + axes[1].spines["top"].set_color("black") + axes[1].spines["right"].set_color("black") + axes[1].legend(loc="upper right", fontsize=4) + + self.progress_bar = tqdm(leave=True, position=0) + self.progress_bar.reset(total=self.n_frames) + + movie = animation.FuncAnimation( + fig, + self._update_plot, + frames=self.frames, + interval=1000 / fps, + blit=True, + ) + movie.save(self.output_video_filename, writer=writer, dpi=400) + self.video.release() + plt.style.use("default") + logger.info("finished making video with matplotlib") + return + + def _get_centroid_data(self, pos_ind): + def centroid_to_px(*idx): + return _to_px( + data=self.position_mean[idx], cm_to_pixels=self.cm_to_pixels + ) + + if not self.crop: + return centroid_to_px(pos_ind) + return np.hstack( + ( + centroid_to_px((pos_ind, 0, np.newaxis)) - self.crop_offset_x, + centroid_to_px((pos_ind, 1, np.newaxis)) - self.crop_offset_y, + ) + ) + + def _set_orient_line(self, frame, pos_ind): + def orient_list(c): + return [c, c + 30 * np.cos(self.orientation_mean[pos_ind])] + + if np.all(np.isnan(self.orientation_mean[pos_ind])): + self.orientation_line.set_data((np.NaN, np.NaN)) + else: + c0, c1 = self._get_centroid_data(pos_ind) + self.orientation_line.set_data(orient_list(c0), orient_list(c1)) + + def _update_plot(self, time_ind, *args): + _ = self._video_set_by_ind(time_ind) + + ret, frame = self.video.read() + if ret: + frame = self._get_frame(frame, crop_order=(2, 3, 0, 1)) + self.image.set_array(frame) + + pos_ind = np.where(self.video_frame_inds == time_ind)[0] + + if len(pos_ind) == 0: + self.centroid_position_dot.set_offsets((np.NaN, np.NaN)) + for bodypart in self.centroid_plot_objs.keys(): + self.centroid_plot_objs[bodypart].set_offsets((np.NaN, np.NaN)) + self.orientation_line.set_data((np.NaN, np.NaN)) + self.title.set_text(f"time = {0:3.4f}s\n frame = {time_ind}") + self.progress_bar.update() + return + + pos_ind = pos_ind[0] + likelihood_inds = pos_ind + self.window_ind + # initial implementation did not cover case of both neg and over < 0 + neg_inds = np.where(likelihood_inds < 0)[0] + likelihood_inds[neg_inds] = 0 if len(neg_inds) > 0 else -1 + + dlc_centroid_data = self._get_centroid_data(pos_ind) + + for bodypart in self.centroid_plot_objs: + self.centroid_plot_objs[bodypart].set_offsets( + _to_px( + data=self.centroids[bodypart][pos_ind], + cm_to_pixels=self.cm_to_pixels, + ) + ) + self.centroid_position_dot.set_offsets(dlc_centroid_data) + _ = self._set_orient_line(frame, pos_ind) + + time_delta = pd.Timedelta( + pd.to_datetime(self.position_time[pos_ind] * 1e9, unit="ns") + - pd.to_datetime(self.position_time[0] * 1e9, unit="ns") + ).total_seconds() + + self.title.set_text(f"time = {time_delta:3.4f}s\n frame = {time_ind}") + for bodypart in self.likelihood_objs.keys(): + self.likelihood_objs[bodypart].set_data( + self.window_ind / self.frame_rate, + np.asarray(self.likelihoods[bodypart][likelihood_inds]), + ) + self.progress_bar.update() + + return ( + self.image, + self.centroid_position_dot, + self.orientation_line, + self.title, + ) + + +def make_video(**kwargs): + VideoMaker(**kwargs) diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index 70a1c1252..8c8f43258 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -1,4 +1,4 @@ -from functools import reduce +from pathlib import Path import datajoint as dj import numpy as np @@ -9,8 +9,11 @@ from spyglass.common.common_behav import RawPosition from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.position.v1.dlc_utils import ( + Centroid, _key_to_smooth_func_dict, + file_log, get_span_start_stop, + infer_output_dir, interp_pos, validate_list, validate_option, @@ -18,7 +21,7 @@ ) from spyglass.position.v1.position_dlc_cohort import DLCSmoothInterpCohort from spyglass.position.v1.position_dlc_position import DLCSmoothInterpParams -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_centroid") @@ -29,8 +32,6 @@ class DLCCentroidParams(SpyglassMixin, dj.Manual): Parameters for calculating the centroid """ - # TODO: whether to keep all params in a params dict - # or break out into individual secondary keys definition = """ dlc_centroid_params_name: varchar(80) # name for this set of parameters --- @@ -113,7 +114,6 @@ class DLCCentroidSelection(SpyglassMixin, dj.Manual): definition = """ -> DLCSmoothInterpCohort -> DLCCentroidParams - --- """ @@ -130,201 +130,170 @@ class DLCCentroid(SpyglassMixin, dj.Computed): dlc_position_object_id : varchar(80) dlc_velocity_object_id : varchar(80) """ + log_path = None def make(self, key): - from .dlc_utils import OutputLogger, infer_output_dir + output_dir = infer_output_dir(key=key, makedir=False) + self.log_path = Path(output_dir, "log.log") + self._logged_make(key) + logger.info("inserted entry into DLCCentroid") + + def _fetch_pos_df(self, key, bodyparts_to_use): + return pd.concat( + { + bodypart: ( + DLCSmoothInterpCohort.BodyPart + & {**key, **{"bodypart": bodypart}} + ).fetch1_dataframe() + for bodypart in bodyparts_to_use + }, + axis=1, + ) + def _available_bodyparts(self, key): + return (DLCSmoothInterpCohort.BodyPart & key).fetch("bodypart") + + @file_log(logger) + def _logged_make(self, key): + METERS_PER_CM = 0.01 idx = pd.IndexSlice - output_dir = infer_output_dir(key=key, makedir=False) - with OutputLogger( - name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", - path=f"{output_dir.as_posix()}/log.log", - print_console=False, - ) as logger: - # Add to Analysis NWB file - analysis_file_name = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) - logger.logger.info("-----------------------") - logger.logger.info("Centroid Calculation") - - # Get labels to smooth from Parameters table - cohort_entries = DLCSmoothInterpCohort.BodyPart & key - params = (DLCCentroidParams() & key).fetch1("params") - centroid_method = params.pop("centroid_method") - bodyparts_avail = cohort_entries.fetch("bodypart") - speed_smoothing_std_dev = params.pop("speed_smoothing_std_dev") - - if not centroid_method: - raise ValueError("Please specify a centroid method to use.") - validate_option(option=centroid_method, options=_key_to_func_dict) - - points = params.get("points") - required_points = _key_to_points.get(centroid_method) - validate_list( - required_items=required_points, - option_list=points, - name="params points", - condition=centroid_method, - ) - for point in required_points: - bodypart = points[point] - if bodypart not in bodyparts_avail: - raise ValueError( # TODO: migrate to input validation - "Bodypart in points not in model." - f"\tBodypart {bodypart}" - f"\tIn Model {bodyparts_avail}" - ) - bodyparts_to_use = [points[point] for point in required_points] - - pos_df = pd.concat( - { - bodypart: ( - DLCSmoothInterpCohort.BodyPart - & {**key, **{"bodypart": bodypart}} - ).fetch1_dataframe() - for bodypart in bodyparts_to_use - }, - axis=1, - ) - dt = np.median(np.diff(pos_df.index.to_numpy())) - sampling_rate = 1 / dt - logger.logger.info( - "Calculating centroid with %s", str(centroid_method) - ) - centroid_func = _key_to_func_dict.get(centroid_method) - centroid = centroid_func(pos_df, **params) - centroid_df = pd.DataFrame( - centroid, - columns=["x", "y"], - index=pos_df.index.to_numpy(), - ) - if params["interpolate"]: - if np.any(np.isnan(centroid)): - logger.logger.info("interpolating over NaNs") - nan_inds = ( - pd.isnull(centroid_df.loc[:, idx[("x", "y")]]) - .any(axis=1) - .to_numpy() - .nonzero()[0] - ) - nan_spans = get_span_start_stop(nan_inds) - interp_df = interp_pos( - centroid_df.copy(), nan_spans, **params["interp_params"] - ) - else: - logger.logger.info("no NaNs to interpolate over") - interp_df = centroid_df.copy() - else: - interp_df = centroid_df.copy() - if params["smooth"]: - smoothing_duration = params["smoothing_params"].get( - "smoothing_duration" + logger.info("Centroid Calculation") + + # Get labels to smooth from Parameters table + params = (DLCCentroidParams() & key).fetch1("params") + + points = params.get("points") + centroid_method = params.get("centroid_method") + required_points = _key_to_points.get(centroid_method) + for point in required_points: + if points[point] not in self._available_bodyparts(key): + raise ValueError( + "Bodypart in points not in model." + f"\tBodypart {points[point]}" + f"\tIn Model {self._available_bodyparts(key)}" ) - if not smoothing_duration: - # TODO: remove - validated with `validate_smooth_params` - raise KeyError( - "smoothing_duration needs to be passed within smoothing_params" - ) - dt = np.median(np.diff(pos_df.index.to_numpy())) - sampling_rate = 1 / dt - logger.logger.info("smoothing position") - smooth_func = _key_to_smooth_func_dict[ - params["smoothing_params"]["smooth_method"] - ] - logger.logger.info( - "Smoothing using method: %s", - str(params["smoothing_params"]["smooth_method"]), + bodyparts_to_use = [points[point] for point in required_points] + + pos_df = self._fetch_pos_df(key=key, bodyparts_to_use=bodyparts_to_use) + + logger.info("Calculating centroid") # now done using number of points + centroid = Centroid( + pos_df=pos_df, + points=params.get("points"), + max_LED_separation=params.get("max_LED_separation"), + ).centroid + centroid_df = pd.DataFrame( + centroid, + columns=["x", "y"], + index=pos_df.index.to_numpy(), + ) + + if params.get("interpolate"): + if np.any(np.isnan(centroid)): + logger.info("interpolating over NaNs") + nan_inds = ( + pd.isnull(centroid_df.loc[:, idx[("x", "y")]]) + .any(axis=1) + .to_numpy() + .nonzero()[0] ) - final_df = smooth_func( - interp_df, - sampling_rate=sampling_rate, - **params["smoothing_params"], + nan_spans = get_span_start_stop(nan_inds) + interp_df = interp_pos( + centroid_df.copy(), nan_spans, **params["interp_params"] ) else: - final_df = interp_df.copy() - logger.logger.info("getting velocity") - velocity = get_velocity( - final_df.loc[:, idx[("x", "y")]].to_numpy(), - time=pos_df.index.to_numpy(), - sigma=speed_smoothing_std_dev, - sampling_frequency=sampling_rate, - ) # cm/s - speed = np.sqrt(np.sum(velocity**2, axis=1)) # cm/s - # Create dataframe - velocity_df = pd.DataFrame( - np.concatenate((velocity, speed[:, np.newaxis]), axis=1), - columns=["velocity_x", "velocity_y", "speed"], - index=pos_df.index.to_numpy(), - ) - total_nan = np.sum( - final_df.loc[:, idx[("x", "y")]].isna().any(axis=1) - ) - pretrack_nan = np.sum( - final_df.iloc[:1000].loc[:, idx[("x", "y")]].isna().any(axis=1) - ) - logger.logger.info("total NaNs in centroid dataset: %d", total_nan) - logger.logger.info( - "NaNs in centroid dataset before ind 1000: %d", pretrack_nan - ) - position = pynwb.behavior.Position() - velocity = pynwb.behavior.BehavioralTimeSeries() - if query := (RawPosition & key): - spatial_series = query.fetch_nwb()[0]["raw_position"] - else: - spatial_series = None - - METERS_PER_CM = 0.01 - position.create_spatial_series( - name="position", - timestamps=final_df.index.to_numpy(), - conversion=METERS_PER_CM, - data=final_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=getattr(spatial_series, "reference_frame", ""), - comments=getattr(spatial_series, "comments", "no comments"), - description="x_position, y_position", - ) - velocity.create_timeseries( - name="velocity", - timestamps=velocity_df.index.to_numpy(), - conversion=METERS_PER_CM, - unit="m/s", - data=velocity_df.loc[ - :, idx[("velocity_x", "velocity_y", "speed")] - ].to_numpy(), - comments=getattr(spatial_series, "comments", "no comments"), - description="x_velocity, y_velocity, speed", - ) - velocity.create_timeseries( - name="video_frame_ind", - unit="index", - timestamps=final_df.index.to_numpy(), - data=pos_df[ - pos_df.columns.levels[0][0] - ].video_frame_ind.to_numpy(), - description="video_frame_ind", - comments="no comments", + interp_df = centroid_df.copy() + else: + interp_df = centroid_df.copy() + + sampling_rate = 1 / np.median(np.diff(pos_df.index.to_numpy())) + if params.get("smooth"): + smooth_params = params["smoothing_params"] + dt = np.median(np.diff(pos_df.index.to_numpy())) + sampling_rate = 1 / dt + smooth_func = _key_to_smooth_func_dict[ + smooth_params["smooth_method"] + ] + logger.info( + f"Smoothing using method: {smooth_func.__name__}", ) - nwb_analysis_file = AnalysisNwbfile() - key.update( - { - "analysis_file_name": analysis_file_name, - "dlc_position_object_id": nwb_analysis_file.add_nwb_object( - analysis_file_name, position - ), - "dlc_velocity_object_id": nwb_analysis_file.add_nwb_object( - analysis_file_name, velocity - ), - } + final_df = smooth_func( + interp_df, sampling_rate=sampling_rate, **smooth_params ) + else: + final_df = interp_df.copy() + + logger.info("getting velocity") + velocity = get_velocity( + final_df.loc[:, idx[("x", "y")]].to_numpy(), + time=pos_df.index.to_numpy(), + sigma=params.pop("speed_smoothing_std_dev"), + sampling_frequency=sampling_rate, + ) + speed = np.sqrt(np.sum(velocity**2, axis=1)) # cm/s + velocity_df = pd.DataFrame( + np.concatenate((velocity, speed[:, np.newaxis]), axis=1), + columns=["velocity_x", "velocity_y", "speed"], + index=pos_df.index.to_numpy(), + ) + total_nan = np.sum(final_df.loc[:, idx[("x", "y")]].isna().any(axis=1)) - nwb_analysis_file.add( - nwb_file_name=key["nwb_file_name"], - analysis_file_name=key["analysis_file_name"], - ) - self.insert1(key) - logger.logger.info("inserted entry into DLCCentroid") - AnalysisNwbfile().log(key, table=self.full_table_name) + logger.info(f"total NaNs in centroid dataset: {total_nan}") + spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] + position = pynwb.behavior.Position() + velocity = pynwb.behavior.BehavioralTimeSeries() + + common_attrs = { + "conversion": METERS_PER_CM, + "comments": spatial_series.comments, + } + position.create_spatial_series( + name="position", + timestamps=final_df.index.to_numpy(), + data=final_df.loc[:, idx[("x", "y")]].to_numpy(), + reference_frame=spatial_series.reference_frame, + description="x_position, y_position", + **common_attrs, + ) + velocity.create_timeseries( + name="velocity", + timestamps=velocity_df.index.to_numpy(), + unit="m/s", + data=velocity_df.loc[ + :, idx[("velocity_x", "velocity_y", "speed")] + ].to_numpy(), + description="x_velocity, y_velocity, speed", + **common_attrs, + ) + velocity.create_timeseries( + name="video_frame_ind", + unit="index", + timestamps=final_df.index.to_numpy(), + data=pos_df[pos_df.columns.levels[0][0]].video_frame_ind.to_numpy(), + description="video_frame_ind", + comments="no comments", + ) + + # Add to Analysis NWB file + analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) + nwb_analysis_file = AnalysisNwbfile() + nwb_analysis_file.add( + nwb_file_name=key["nwb_file_name"], + analysis_file_name=analysis_file_name, + ) + + self.insert1( + { + **key, + "analysis_file_name": analysis_file_name, + "dlc_position_object_id": nwb_analysis_file.add_nwb_object( + analysis_file_name, position + ), + "dlc_velocity_object_id": nwb_analysis_file.add_nwb_object( + analysis_file_name, velocity + ), + } + ) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] @@ -365,458 +334,6 @@ def fetch1_dataframe(self): ) -def four_led_centroid(pos_df: pd.DataFrame, **params): - """Determines the centroid of 4 LEDS on an implant LED ring. - Assumed to be the Green LED, and 3 red LEDs called: redLED_C, redLED_L, redLED_R - By default, uses (greenled + redLED_C) / 2 to calculate centroid - - If Green LED is NaN, but red center LED is not, - then the red center LED is called the centroid - If green and red center LEDs are NaN, but red left and red right LEDs are not, - then the centroid is (redLED_L + redLED_R) / 2 - If red center LED is NaN, but the other 3 LEDS are not, - then the centroid is (greenled + (redLED_L + redLED_R) / 2) / 2 - If red center and left LEDs are NaN, but green and red right LEDs are not, - then the centroid is (greenled + redLED_R) / 2 - If red center and right LEDs are NaN, but green and red left LEDs are not, - then the centroid is (greenled + redLED_L) / 2 - If all red LEDs are NaN, but green LED is not, - then the green LED is called the centroid - If all LEDs are NaN, then the centroid is NaN - - Parameters - ---------- - pos_df : pd.DataFrame - dataframe containing x and y position for each LED of interest, - index is timestamps. Column names specified by params - **params : dict - contains 'greenLED' and 'redLED_C', 'redLED_R', 'redLED_L' keys, - whose values specify the column names in `pos_df` - - Returns - ------- - centroid : np.ndarray - numpy array with shape (n_time, 2) - centroid[0] is the x coord and centroid[1] is the y coord - """ - if not (params.get("max_LED_separation") and params.get("points")): - raise KeyError("max_LED_separation/points need to be passed in params") - - centroid = np.zeros(shape=(len(pos_df), 2)) - idx = pd.IndexSlice - # TODO: this feels messy, clean-up - green_led = params["points"].pop("greenLED", None) - red_led_C = params["points"].pop("redLED_C", None) - red_led_L = params["points"].pop("redLED_L", None) - red_led_R = params["points"].pop("redLED_R", None) - green_nans = pos_df.loc[:, idx[green_led, ("x", "y")]].isna().any(axis=1) - red_C_nans = pos_df.loc[:, idx[red_led_C, ("x", "y")]].isna().any(axis=1) - red_L_nans = pos_df.loc[:, idx[red_led_L, ("x", "y")]].isna().any(axis=1) - red_R_nans = pos_df.loc[:, idx[red_led_R, ("x", "y")]].isna().any(axis=1) - # TODO: implement checks to make sure not rewriting previously set index in centroid - # If all given LEDs are not NaN - dist_between_green_red = get_distance( - pos_df.loc[:, idx[red_led_C, ("x", "y")]].to_numpy(), - pos_df.loc[:, idx[green_led, ("x", "y")]].to_numpy(), - ) - g_c_is_too_separated = ( - dist_between_green_red >= params["max_LED_separation"] - ) - all_good_mask = reduce( - np.logical_and, - ( - ~green_nans, - ~red_C_nans, - ~red_L_nans, - ~red_R_nans, - ~g_c_is_too_separated, - ), - ) - centroid[all_good_mask] = [ - *zip( - ( - pos_df.loc[idx[all_good_mask], idx[red_led_C, "x"]] - + pos_df.loc[idx[all_good_mask], idx[green_led, "x"]] - ) - / 2, - ( - pos_df.loc[idx[all_good_mask], idx[red_led_C, "y"]] - + pos_df.loc[idx[all_good_mask], idx[green_led, "y"]] - ) - / 2, - ) - ] - # If green LED and red center LED are both not NaN - green_red_C = np.logical_and( - ~green_nans, ~red_C_nans, ~g_c_is_too_separated - ) - if np.sum(green_red_C) > 0: - centroid[green_red_C] = [ - *zip( - ( - pos_df.loc[idx[green_red_C], idx[red_led_C, "x"]] - + pos_df.loc[idx[green_red_C], idx[green_led, "x"]] - ) - / 2, - ( - pos_df.loc[idx[green_red_C], idx[red_led_C, "y"]] - + pos_df.loc[idx[green_red_C], idx[green_led, "y"]] - ) - / 2, - ) - ] - # If all given LEDs are NaN - all_bad_mask = reduce( - np.logical_and, (green_nans, red_C_nans, red_L_nans, red_R_nans) - ) - centroid[all_bad_mask, :] = np.nan - # If green LED is NaN, but red center LED is not - no_green_red_C = np.logical_and(green_nans, ~red_C_nans) - if np.sum(no_green_red_C) > 0: - centroid[no_green_red_C] = [ - *zip( - pos_df.loc[idx[no_green_red_C], idx[red_led_C, "x"]], - pos_df.loc[idx[no_green_red_C], idx[red_led_C, "y"]], - ) - ] - # If green and red center LEDs are NaN, but red left and red right LEDs are not - dist_between_left_right = get_distance( - pos_df.loc[:, idx[red_led_L, ("x", "y")]].to_numpy(), - pos_df.loc[:, idx[red_led_R, ("x", "y")]].to_numpy(), - ) - l_r_is_too_separated = ( - dist_between_left_right >= params["max_LED_separation"] - ) - no_green_no_red_C_red_L_red_R = reduce( - np.logical_and, - ( - green_nans, - red_C_nans, - ~red_L_nans, - ~red_R_nans, - ~l_r_is_too_separated, - ), - ) - if np.sum(no_green_no_red_C_red_L_red_R) > 0: - centroid[no_green_no_red_C_red_L_red_R] = [ - *zip( - ( - pos_df.loc[ - idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "x"] - ] - + pos_df.loc[ - idx[no_green_no_red_C_red_L_red_R], idx[red_led_R, "x"] - ] - ) - / 2, - ( - pos_df.loc[ - idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "y"] - ] - + pos_df.loc[ - idx[no_green_no_red_C_red_L_red_R], idx[red_led_R, "y"] - ] - ) - / 2, - ) - ] - # If red center LED is NaN, but green, red left, and right LEDs are not - dist_between_left_green = get_distance( - pos_df.loc[:, idx[red_led_L, ("x", "y")]].to_numpy(), - pos_df.loc[:, idx[green_led, ("x", "y")]].to_numpy(), - ) - dist_between_right_green = get_distance( - pos_df.loc[:, idx[red_led_R, ("x", "y")]].to_numpy(), - pos_df.loc[:, idx[green_led, ("x", "y")]].to_numpy(), - ) - l_g_is_too_separated = ( - dist_between_left_green >= params["max_LED_separation"] - ) - r_g_is_too_separated = ( - dist_between_right_green >= params["max_LED_separation"] - ) - green_red_L_red_R_no_red_C = reduce( - np.logical_and, - ( - ~green_nans, - red_C_nans, - ~red_L_nans, - ~red_R_nans, - ~l_r_is_too_separated, - ~l_g_is_too_separated, - ~r_g_is_too_separated, - ), - ) - if np.sum(green_red_L_red_R_no_red_C) > 0: - midpoint = ( - ( - pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_L, "x"]] - + pos_df.loc[ - idx[green_red_L_red_R_no_red_C], idx[red_led_R, "x"] - ] - ) - / 2, - ( - pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_L, "y"]] - + pos_df.loc[ - idx[green_red_L_red_R_no_red_C], idx[red_led_R, "y"] - ] - ) - / 2, - ) - centroid[green_red_L_red_R_no_red_C] = [ - *zip( - ( - midpoint[0] - + pos_df.loc[ - idx[green_red_L_red_R_no_red_C], idx[green_led, "x"] - ] - ) - / 2, - ( - midpoint[1] - + pos_df.loc[ - idx[green_red_L_red_R_no_red_C], idx[green_led, "y"] - ] - ) - / 2, - ) - ] - # If red center and left LED is NaN, but green and red right LED are not - green_red_R_no_red_C_no_red_L = reduce( - np.logical_and, - ( - ~green_nans, - red_C_nans, - red_L_nans, - ~red_R_nans, - ~r_g_is_too_separated, - ), - ) - if np.sum(green_red_R_no_red_C_no_red_L) > 0: - centroid[green_red_R_no_red_C_no_red_L] = [ - *zip( - ( - pos_df.loc[ - idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "x"] - ] - + pos_df.loc[ - idx[green_red_R_no_red_C_no_red_L], idx[green_led, "x"] - ] - ) - / 2, - ( - pos_df.loc[ - idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "y"] - ] - + pos_df.loc[ - idx[green_red_R_no_red_C_no_red_L], idx[green_led, "y"] - ] - ) - / 2, - ) - ] - # If red center and right LED is NaN, but green and red left LED are not - green_red_L_no_red_C_no_red_R = reduce( - np.logical_and, - ( - ~green_nans, - red_C_nans, - ~red_L_nans, - red_R_nans, - ~l_g_is_too_separated, - ), - ) - if np.sum(green_red_L_no_red_C_no_red_R) > 0: - centroid[green_red_L_no_red_C_no_red_R] = [ - *zip( - ( - pos_df.loc[ - idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "x"] - ] - + pos_df.loc[ - idx[green_red_L_no_red_C_no_red_R], idx[green_led, "x"] - ] - ) - / 2, - ( - pos_df.loc[ - idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "y"] - ] - + pos_df.loc[ - idx[green_red_L_no_red_C_no_red_R], idx[green_led, "y"] - ] - ) - / 2, - ) - ] - # If all LEDS are NaN except red left LED - red_L_no_green_no_red_C_no_red_R = reduce( - np.logical_and, (green_nans, red_C_nans, ~red_L_nans, red_R_nans) - ) - if np.sum(red_L_no_green_no_red_C_no_red_R) > 0: - centroid[red_L_no_green_no_red_C_no_red_R] = [ - *zip( - pos_df.loc[ - idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "x"] - ], - pos_df.loc[ - idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "y"] - ], - ) - ] - # If all LEDS are NaN except red right LED - red_R_no_green_no_red_C_no_red_L = reduce( - np.logical_and, (green_nans, red_C_nans, red_L_nans, ~red_R_nans) - ) - if np.sum(red_R_no_green_no_red_C_no_red_L) > 0: - centroid[red_R_no_green_no_red_C_no_red_L] = [ - *zip( - pos_df.loc[ - idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "x"] - ], - pos_df.loc[ - idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "y"] - ], - ) - ] - # If all red LEDs are NaN, but green LED is not - green_no_red = reduce( - np.logical_and, (~green_nans, red_C_nans, red_L_nans, red_R_nans) - ) - if np.sum(green_no_red) > 0: - centroid[green_no_red] = [ - *zip( - pos_df.loc[idx[green_no_red], idx[green_led, "x"]], - pos_df.loc[idx[green_no_red], idx[green_led, "y"]], - ) - ] - too_separated_inds = reduce( - np.logical_or, - ( - g_c_is_too_separated, - l_r_is_too_separated, - l_g_is_too_separated, - r_g_is_too_separated, - ), - ) - if np.sum(too_separated_inds) > 0: - centroid[too_separated_inds, :] = np.nan - return centroid - - -def two_pt_centroid(pos_df: pd.DataFrame, **params): - """ - Determines the centroid of 2 points using (point1 + point2) / 2 - For a given timestamp, if one point is NaN, - then the other point is assigned as the centroid. - If both are NaN, the centroid is NaN - - Parameters - ---------- - pos_df : pd.DataFrame - dataframe containing x and y position for each point of interest, - index is timestamps. Column names specified by params - **params : dict - contains 'point1' and 'point2' keys, - whose values specify the column names in `pos_df` - - Returns - ------- - centroid : np.ndarray - numpy array with shape (n_time, 2) - centroid[0] is the x coord and centroid[1] is the y coord - """ - if not (params.get("max_LED_separation") and params.get("points")): - raise KeyError("max_LED_separation/points need to be passed in params") - - idx = pd.IndexSlice - centroid = np.zeros(shape=(len(pos_df), 2)) - PT1 = params["points"].pop("point1", None) - PT2 = params["points"].pop("point2", None) - pt1_nans = pos_df.loc[:, idx[PT1, ("x", "y")]].isna().any(axis=1) - pt2_nans = pos_df.loc[:, idx[PT2, ("x", "y")]].isna().any(axis=1) - dist_between_points = get_distance( - pos_df.loc[:, idx[PT1, ("x", "y")]].to_numpy(), - pos_df.loc[:, idx[PT2, ("x", "y")]].to_numpy(), - ) - is_too_separated = dist_between_points >= params["max_LED_separation"] - all_good_mask = np.logical_and(~pt1_nans, ~pt2_nans, ~is_too_separated) - centroid[all_good_mask] = [ - *zip( - ( - pos_df.loc[idx[all_good_mask], idx[PT1, "x"]] - + pos_df.loc[idx[all_good_mask], idx[PT2, "x"]] - ) - / 2, - ( - pos_df.loc[idx[all_good_mask], idx[PT1, "y"]] - + pos_df.loc[idx[all_good_mask], idx[PT2, "y"]] - ) - / 2, - ) - ] - # If only point1 is good - pt1_mask = np.logical_and(~pt1_nans, pt2_nans) - if np.sum(pt1_mask) > 0: - centroid[pt1_mask] = [ - *zip( - pos_df.loc[idx[pt1_mask], idx[PT1, "x"]], - pos_df.loc[idx[pt1_mask], idx[PT1, "y"]], - ) - ] - # If only point2 is good - pt2_mask = np.logical_and(pt1_nans, ~pt2_nans) - if np.sum(pt2_mask) > 0: - centroid[pt2_mask] = [ - *zip( - pos_df.loc[idx[pt2_mask], idx[PT2, "x"]], - pos_df.loc[idx[pt2_mask], idx[PT2, "y"]], - ) - ] - # If neither point is not NaN - all_bad_mask = np.logical_and(pt1_nans, pt2_nans) - centroid[all_bad_mask, :] = np.nan - # If LEDs are too far apart - centroid[is_too_separated, :] = np.nan - - return centroid - - -def one_pt_centroid(pos_df: pd.DataFrame, **params): - """ - Passes through the provided point as the centroid - For a given timestamp, if the point is NaN, - then the centroid is NaN - - Parameters - ---------- - pos_df : pd.DataFrame - dataframe containing x and y position for the point of interest, - index is timestamps. Column name specified by params - **params : dict - contains a 'point1' key, - whose value specifies the column name in `pos_df` - - Returns - ------- - centroid : np.ndarray - numpy array with shape (n_time, 2) - centroid[0] is the x coord and centroid[1] is the y coord - """ - if not params.get("points"): - raise KeyError("points need to be passed in params") - idx = pd.IndexSlice - PT1 = params["points"].pop("point1", None) - centroid = pos_df.loc[:, idx[PT1, ("x", "y")]].to_numpy() - return centroid - - -_key_to_func_dict = { - "four_led_centroid": four_led_centroid, - "two_pt_centroid": two_pt_centroid, - "one_pt_centroid": one_pt_centroid, -} _key_to_points = { "four_led_centroid": ["greenLED", "redLED_L", "redLED_C", "redLED_R"], "two_pt_centroid": ["point1", "point2"], diff --git a/src/spyglass/position/v1/position_dlc_cohort.py b/src/spyglass/position/v1/position_dlc_cohort.py index 6cf1f0eee..4883fc335 100644 --- a/src/spyglass/position/v1/position_dlc_cohort.py +++ b/src/spyglass/position/v1/position_dlc_cohort.py @@ -1,13 +1,16 @@ +from pathlib import Path + import datajoint as dj import numpy as np import pandas as pd from spyglass.common.common_nwbfile import AnalysisNwbfile +from spyglass.position.v1.dlc_utils import file_log, infer_output_dir from spyglass.position.v1.position_dlc_pose_estimation import ( # noqa: F401 DLCPoseEstimation, ) from spyglass.position.v1.position_dlc_position import DLCSmoothInterp -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_cohort") @@ -39,6 +42,7 @@ class DLCSmoothInterpCohort(SpyglassMixin, dj.Computed): -> DLCSmoothInterpCohortSelection --- """ + log_path = None class BodyPart(SpyglassMixin, dj.Part): definition = """ @@ -87,46 +91,40 @@ def fetch1_dataframe(self): ) def make(self, key): - from .dlc_utils import OutputLogger, infer_output_dir - output_dir = infer_output_dir(key=key, makedir=False) - with OutputLogger( - name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", - path=f"{output_dir.as_posix()}/log.log", - print_console=False, - ) as logger: - logger.logger.info("-----------------------") - logger.logger.info("Bodypart Cohort") - # from Jen Guidera - self.insert1(key) - cohort_selection = (DLCSmoothInterpCohortSelection & key).fetch1() - table_entries = [] - bodyparts_params_dict = cohort_selection.pop( - "bodyparts_params_dict" + self.log_path = Path(output_dir) / "log.log" + self._logged_make(key) + logger.info("Inserted entry into DLCSmoothInterpCohort") + + @file_log(logger, console=False) + def _logged_make(self, key): + logger.info("-----------------------") + logger.info("Bodypart Cohort") + + cohort_selection = (DLCSmoothInterpCohortSelection & key).fetch1() + table_entries = [] + bp_params_dict = cohort_selection.pop("bodyparts_params_dict") + temp_key = cohort_selection.copy() + for bodypart, params in bp_params_dict.items(): + temp_key["bodypart"] = bodypart + temp_key["dlc_si_params_name"] = params + table_entries.append((DLCSmoothInterp & temp_key).fetch()) + + if not len(table_entries) == len(bp_params_dict): + raise ValueError( + f"Mismatch: DLCSmoothInterp {len(table_entries)} vs " + + f"bodyparts_params_dict {len(bp_params_dict)}" ) - temp_key = cohort_selection.copy() - for bodypart, params in bodyparts_params_dict.items(): - temp_key["bodypart"] = bodypart - temp_key["dlc_si_params_name"] = params - table_entries.append((DLCSmoothInterp & temp_key).fetch()) - assert len(table_entries) == len( - bodyparts_params_dict - ), "more entries found in DLCSmoothInterp than specified in bodyparts_params_dict" - table_column_names = list(table_entries[0].dtype.fields.keys()) - - if len(table_entries) == 0: - raise ValueError( - f"No entries found in DLCSmoothInterp for {temp_key}" - ) - - for table_entry in table_entries: - entry_key = { - **{ - k: v for k, v in zip(table_column_names, table_entry[0]) - }, - **key, - } - DLCSmoothInterpCohort.BodyPart.insert1( - entry_key, skip_duplicates=True - ) - logger.logger.info("Inserted entry into DLCSmoothInterpCohort") + + table_column_names = list(table_entries[0].dtype.fields.keys()) + + part_keys = [ + { + **{k: v for k, v in zip(table_column_names, table_entry[0])}, + **key, + } + for table_entry in table_entries + ] + + self.insert1(key) + self.BodyPart.insert(part_keys, skip_duplicates=True) diff --git a/src/spyglass/position/v1/position_dlc_model.py b/src/spyglass/position/v1/position_dlc_model.py index 5b1cf265b..979e4ddf1 100644 --- a/src/spyglass/position/v1/position_dlc_model.py +++ b/src/spyglass/position/v1/position_dlc_model.py @@ -1,14 +1,12 @@ -import glob import os -from pathlib import Path, PosixPath, PurePath +from pathlib import Path import datajoint as dj import ruamel.yaml as yaml -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from . import dlc_reader -from .dlc_decorators import accepts from .position_dlc_project import BodyPart, DLCProject # noqa: F401 from .position_dlc_training import DLCModelTraining # noqa: F401 @@ -31,10 +29,11 @@ class DLCModelInput(SpyglassMixin, dj.Manual): def insert1(self, key, **kwargs): # expects key from DLCProject with config_path project_path = Path(key["config_path"]).parent - assert project_path.exists(), "project path does not exist" + if not project_path.exists(): + raise FileNotFoundError(f"path does not exist: {project_path}") key["dlc_model_name"] = f'{project_path.name.split("model")[0]}model' key["project_path"] = project_path.as_posix() - del key["config_path"] + _ = key.pop("config_path") super().insert1(key, **kwargs) DLCModelSource.insert_entry( dlc_model_name=key["dlc_model_name"], @@ -75,7 +74,6 @@ class FromUpstream(SpyglassMixin, dj.Part): """ @classmethod - @accepts(None, None, ("FromUpstream", "FromImport"), None) def insert_entry( cls, dlc_model_name: str, @@ -144,7 +142,6 @@ class DLCModelSelection(SpyglassMixin, dj.Manual): definition = """ -> DLCModelSource -> DLCModelParams - --- """ @@ -178,34 +175,41 @@ def make(self, key): from deeplabcut.utils.auxiliaryfunctions import GetScorerName _, model_name, table_source = (DLCModelSource & key).fetch1().values() + SourceTable = getattr(DLCModelSource, table_source) params = (DLCModelParams & key).fetch1("params") - project_path = (SourceTable & key).fetch1("project_path") - if not isinstance(project_path, PosixPath): - project_path = Path(project_path) - config_query = PurePath(project_path, Path("*config.y*ml")) - available_config = glob.glob(config_query.as_posix()) - dj_config = [path for path in available_config if "dj_dlc" in path] - if len(dj_config) > 0: - config_path = Path(dj_config[0]) - elif len(available_config) == 1: - config_path = Path(available_config[0]) - else: - config_path = PurePath(project_path, Path("config.yaml")) + project_path = Path((SourceTable & key).fetch1("project_path")) + + available_config = list(project_path.glob("*config.y*ml")) + dj_config = [path for path in available_config if "dj_dlc" in str(path)] + config_path = ( + Path(dj_config[0]) + if len(dj_config) > 0 + else ( + Path(available_config[0]) + if len(available_config) == 1 + else project_path / "config.yaml" + ) + ) + if not config_path.exists(): - raise OSError(f"config_path {config_path} does not exist.") + raise FileNotFoundError(f"config does not exist: {config_path}") + if config_path.suffix in (".yml", ".yaml"): with open(config_path, "rb") as f: safe_yaml = yaml.YAML(typ="safe", pure=True) dlc_config = safe_yaml.load(f) - if isinstance(params["params"], dict): + if isinstance(params.get("params"), dict): dlc_config.update(params["params"]) del params["params"] + # TODO: clean-up. this feels sloppy shuffle = params.pop("shuffle", 1) trainingsetindex = params.pop("trainingsetindex", None) + if not isinstance(trainingsetindex, int): raise KeyError("no trainingsetindex specified in key") + model_prefix = params.pop("model_prefix", "") model_description = params.pop("model_description", model_name) _ = params.pop("dlc_training_params_name", None) @@ -217,10 +221,10 @@ def make(self, key): "snapshotindex", "TrainingFraction", ] - for attribute in needed_attributes: - assert ( - attribute in dlc_config - ), f"Couldn't find {attribute} in config" + if not set(needed_attributes).issubset(set(dlc_config)): + raise KeyError( + f"Missing required config attributes: {needed_attributes}" + ) scorer_legacy = str_to_bool(dlc_config.get("scorer_legacy", "f")) @@ -252,12 +256,12 @@ def make(self, key): # ---- Save DJ-managed config ---- _ = dlc_reader.save_yaml(project_path, dlc_config) - # ____ Insert into table ---- + # --- Insert into table ---- self.insert1(key) self.BodyPart.insert( {**part_key, "bodypart": bp} for bp in dlc_config["bodyparts"] ) - print( + logger.info( f"Finished inserting {model_name}, training iteration" f" {dlc_config['iteration']} into DLCModel" ) diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index f64802a59..d87ecf8bc 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -8,13 +8,26 @@ from spyglass.common.common_behav import RawPosition from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.position.v1.dlc_utils import get_span_start_stop -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.position.v1.dlc_utils import ( + get_span_start_stop, + interp_orientation, + no_orientation, + red_led_bisector_orientation, + two_pt_head_orientation, +) +from spyglass.utils import SpyglassMixin, logger from .position_dlc_cohort import DLCSmoothInterpCohort schema = dj.schema("position_v1_dlc_orient") +# Add new functions for orientation calculation here +_key_to_func_dict = { + "none": no_orientation, + "red_green_orientation": two_pt_head_orientation, + "red_led_bisector": red_led_bisector_orientation, +} + @schema class DLCOrientationParams(SpyglassMixin, dj.Manual): @@ -63,8 +76,6 @@ def get_default(cls): @schema class DLCOrientationSelection(SpyglassMixin, dj.Manual): - """ """ - definition = """ -> DLCSmoothInterpCohort -> DLCOrientationParams @@ -85,9 +96,7 @@ class DLCOrientation(SpyglassMixin, dj.Computed): dlc_orientation_object_id : varchar(80) """ - def make(self, key): - # Get labels to smooth from Parameters table - AnalysisNwbfile()._creation_times["pre_create_time"] = time() + def _get_pos_df(self, key): cohort_entries = DLCSmoothInterpCohort.BodyPart & key pos_df = pd.concat( { @@ -99,14 +108,21 @@ def make(self, key): }, axis=1, ) + return pos_df + + def make(self, key): + # Get labels to smooth from Parameters table + AnalysisNwbfile()._creation_times["pre_create_time"] = time() + pos_df = self._get_pos_df(key) + params = (DLCOrientationParams() & key).fetch1("params") orientation_smoothing_std_dev = params.pop( "orientation_smoothing_std_dev", None ) - dt = np.median(np.diff(pos_df.index.to_numpy())) - sampling_rate = 1 / dt + sampling_rate = 1 / np.median(np.diff(pos_df.index.to_numpy())) orient_func = _key_to_func_dict[params["orient_method"]] orientation = orient_func(pos_df, **params) + if not params["orient_method"] == "none": # Smooth orientation is_nan = np.isnan(orientation) @@ -130,6 +146,7 @@ def make(self, key): ) # convert back to between -pi and pi orientation = np.angle(np.exp(1j * orientation)) + final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) @@ -141,6 +158,7 @@ def make(self, key): spatial_series = query.fetch_nwb()[0]["raw_position"] else: spatial_series = None + orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( name="orientation", @@ -172,9 +190,7 @@ def fetch1_dataframe(self): ), name="time", ) - COLUMNS = [ - "orientation", - ] + COLUMNS = ["orientation"] return pd.DataFrame( np.asarray(nwb_data["dlc_orientation"].get_spatial_series().data)[ :, np.newaxis @@ -182,97 +198,3 @@ def fetch1_dataframe(self): columns=COLUMNS, index=index, ) - - -def two_pt_head_orientation(pos_df: pd.DataFrame, **params): - """Determines orientation based on vector between two points""" - BP1 = params.pop("bodypart1", None) - BP2 = params.pop("bodypart2", None) - orientation = np.arctan2( - (pos_df[BP1]["y"] - pos_df[BP2]["y"]), - (pos_df[BP1]["x"] - pos_df[BP2]["x"]), - ) - return orientation - - -def no_orientation(pos_df: pd.DataFrame, **params): - fill_value = params.pop("fill_with", np.nan) - n_frames = len(pos_df) - orientation = np.full( - shape=(n_frames), fill_value=fill_value, dtype=np.float16 - ) - return orientation - - -def red_led_bisector_orientation(pos_df: pd.DataFrame, **params): - """Determines orientation based on 2 equally-spaced identifiers - that are assumed to be perpendicular to the orientation direction. - A third object is needed to determine forward/backward - """ - LED1 = params.pop("led1", None) - LED2 = params.pop("led2", None) - LED3 = params.pop("led3", None) - orientation = [] - for index, row in pos_df.iterrows(): - x_vec = row[LED1]["x"] - row[LED2]["x"] - y_vec = row[LED1]["y"] - row[LED2]["y"] - if y_vec == 0: - if (row[LED3]["y"] > row[LED1]["y"]) & ( - row[LED3]["y"] > row[LED2]["y"] - ): - orientation.append(np.pi / 2) - elif (row[LED3]["y"] < row[LED1]["y"]) & ( - row[LED3]["y"] < row[LED2]["y"] - ): - orientation.append(-(np.pi / 2)) - else: - raise Exception("Cannot determine head direction from bisector") - else: - length = np.sqrt(y_vec * y_vec + x_vec * x_vec) - norm = np.array([-y_vec / length, x_vec / length]) - orientation.append(np.arctan2(norm[1], norm[0])) - if index + 1 == len(pos_df): - break - return np.array(orientation) - - -# Add new functions for orientation calculation here - -_key_to_func_dict = { - "none": no_orientation, - "red_green_orientation": two_pt_head_orientation, - "red_led_bisector": red_led_bisector_orientation, -} - - -def interp_orientation(orientation, spans_to_interp, **kwargs): - idx = pd.IndexSlice - # TODO: add parameters to refine interpolation - for ind, (span_start, span_stop) in enumerate(spans_to_interp): - if (span_stop + 1) >= len(orientation): - orientation.loc[idx[span_start:span_stop], idx["orientation"]] = ( - np.nan - ) - print(f"ind: {ind} has no endpoint with which to interpolate") - continue - if span_start < 1: - orientation.loc[idx[span_start:span_stop], idx["orientation"]] = ( - np.nan - ) - print(f"ind: {ind} has no startpoint with which to interpolate") - continue - orient = [ - orientation["orientation"].iloc[span_start - 1], - orientation["orientation"].iloc[span_stop + 1], - ] - start_time = orientation.index[span_start] - stop_time = orientation.index[span_stop] - orientnew = np.interp( - x=orientation.index[span_start : span_stop + 1], - xp=[start_time, stop_time], - fp=[orient[0], orient[-1]], - ) - orientation.loc[idx[start_time:stop_time], idx["orientation"]] = ( - orientnew - ) - return orientation diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index 6ae7669bf..4b82918a0 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -1,5 +1,5 @@ -import os from datetime import datetime +from pathlib import Path import datajoint as dj import matplotlib.pyplot as plt @@ -14,10 +14,16 @@ convert_epoch_interval_name_to_position_interval_name, ) from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.position.v1.dlc_utils import ( + file_log, + find_mp4, + get_video_info, + infer_output_dir, +) +from spyglass.position.v1.position_dlc_model import DLCModel +from spyglass.utils import SpyglassMixin, logger -from .dlc_utils import OutputLogger, infer_output_dir -from .position_dlc_model import DLCModel +from . import dlc_reader schema = dj.schema("position_v1_dlc_pose_estimation") @@ -33,6 +39,7 @@ class DLCPoseEstimationSelection(SpyglassMixin, dj.Manual): pose_estimation_output_dir='': varchar(255) # output dir relative to the root dir pose_estimation_params=null : longblob # analyze_videos params, if not default """ + log_path = None @classmethod def get_video_crop(cls, video_path, crop_input=None): @@ -48,6 +55,8 @@ def get_video_crop(cls, video_path, crop_input=None): ------- crop_ints : list list of 4 integers [x min, x max, y min, y max] + crop_input : str, optional + input string to determine cropping parameters. If None, user is queried """ import cv2 @@ -75,9 +84,8 @@ def get_video_crop(cls, video_path, crop_input=None): assert all(isinstance(val, int) for val in crop_ints) return crop_ints - @classmethod def insert_estimation_task( - cls, + self, key, task_mode="trigger", # load or trigger params: dict = None, @@ -98,40 +106,40 @@ def insert_estimation_task( videotype, gputouse, save_as_csv, batchsize, cropping, TFGPUinference, dynamic, robust_nframes, allow_growth, use_shelve """ - from .dlc_utils import check_videofile, get_video_path - - video_path, video_filename, _, _ = get_video_path(key) output_dir = infer_output_dir(key) + self.log_path = Path(output_dir) / "log.log" + self._insert_est_with_log( + key, task_mode, params, check_crop, skip_duplicates, output_dir + ) + logger.info("inserted entry into Pose Estimation Selection") + return {**key, "task_mode": task_mode} - if not video_path: - raise FileNotFoundError(f"Video file not found for {key}") + @file_log(logger, console=False) + def _insert_est_with_log( + self, key, task_mode, params, check_crop, skip_duplicates, output_dir + ): - with OutputLogger( - name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", - path=f"{output_dir.as_posix()}/log.log", - ) as logger: - logger.logger.info("Pose Estimation Selection") - video_dir = os.path.dirname(video_path) + "/" - logger.logger.info("video_dir: %s", video_dir) - video_path = check_videofile( - video_path=video_dir, video_filename=video_filename - )[0] - if check_crop is not None: - params["cropping"] = cls.get_video_crop( - video_path=video_path.as_posix() - ) - cls.insert1( - { - **key, - "task_mode": task_mode, - "pose_estimation_params": params, - "video_path": video_path, - "pose_estimation_output_dir": output_dir, - }, - skip_duplicates=skip_duplicates, + v_path, v_fname, _, _ = get_video_info(key) + if not v_path: + raise FileNotFoundError(f"Video file not found for {key}") + logger.info("Pose Estimation Selection") + v_dir = Path(v_path).parent + logger.info("video_dir: %s", v_dir) + v_path = find_mp4(video_path=v_dir, video_filename=v_fname) + if check_crop: + params["cropping"] = self.get_video_crop( + video_path=v_path.as_posix() ) - logger.logger.info("inserted entry into Pose Estimation Selection") - return {**key, "task_mode": task_mode} + self.insert1( + { + **key, + "task_mode": task_mode, + "pose_estimation_params": params, + "video_path": v_path, + "pose_estimation_output_dir": output_dir, + }, + skip_duplicates=skip_duplicates, + ) @schema @@ -154,6 +162,7 @@ class BodyPart(SpyglassMixin, dj.Part): """ _nwb_table = AnalysisNwbfile + log_path = None def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] @@ -199,152 +208,147 @@ def fetch1_dataframe(self): def make(self, key): """.populate() method will launch training for each PoseEstimationTask""" - from . import dlc_reader - from .dlc_utils import get_video_path + self.log_path = ( + Path(infer_output_dir(key=key, makedir=False)) / "log.log" + ) + self._logged_make(key) + + @file_log(logger, console=True) + def _logged_make(self, key): METERS_PER_CM = 0.01 - output_dir = infer_output_dir(key=key, makedir=False) - with OutputLogger( - name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", - path=f"{output_dir.as_posix()}/log.log", - ) as logger: - logger.logger.info("----------------------") - logger.logger.info("Pose Estimation") - # ID model and directories - dlc_model = (DLCModel & key).fetch1() - bodyparts = (DLCModel.BodyPart & key).fetch("bodypart") - task_mode, analyze_video_params, video_path, output_dir = ( - DLCPoseEstimationSelection & key - ).fetch1( - "task_mode", - "pose_estimation_params", - "video_path", - "pose_estimation_output_dir", + logger.info("----------------------") + logger.info("Pose Estimation") + # ID model and directories + dlc_model = (DLCModel & key).fetch1() + bodyparts = (DLCModel.BodyPart & key).fetch("bodypart") + task_mode, analyze_video_params, video_path, output_dir = ( + DLCPoseEstimationSelection & key + ).fetch1( + "task_mode", + "pose_estimation_params", + "video_path", + "pose_estimation_output_dir", + ) + analyze_video_params = analyze_video_params or {} + + project_path = dlc_model["project_path"] + + # Trigger PoseEstimation + if task_mode == "trigger": + dlc_reader.do_pose_estimation( + video_path, + dlc_model, + project_path, + output_dir, + **analyze_video_params, ) - analyze_video_params = analyze_video_params or {} - - project_path = dlc_model["project_path"] - - # Trigger PoseEstimation - if task_mode == "trigger": - dlc_reader.do_pose_estimation( - video_path, - dlc_model, - project_path, - output_dir, - **analyze_video_params, - ) - dlc_result = dlc_reader.PoseEstimation(output_dir) - creation_time = datetime.fromtimestamp( - dlc_result.creation_time - ).strftime("%Y-%m-%d %H:%M:%S") - - # get video information - _, _, meters_per_pixel, video_time = get_video_path(key) - # check if a position interval exists for this epoch - if interval_list_name := ( - convert_epoch_interval_name_to_position_interval_name( + dlc_result = dlc_reader.PoseEstimation(output_dir) + creation_time = datetime.fromtimestamp( + dlc_result.creation_time + ).strftime("%Y-%m-%d %H:%M:%S") + + logger.info("getting raw position") + interval_list_name = ( + convert_epoch_interval_name_to_position_interval_name( + { + "nwb_file_name": key["nwb_file_name"], + "epoch": key["epoch"], + }, + populate_missing=False, + ) + ) + spatial_series = ( + RawPosition() & {**key, "interval_list_name": interval_list_name} + ).fetch_nwb()[0]["raw_position"] + _, _, _, video_time = get_video_info(key) + pos_time = spatial_series.timestamps + + # TODO: should get timestamps from VideoFile, but need the + # video_frame_ind from RawPosition, which also has timestamps + + key["meters_per_pixel"] = spatial_series.conversion + + # Insert entry into DLCPoseEstimation + logger.info( + "Inserting %s, epoch %02d into DLCPoseEsimation", + key["nwb_file_name"], + key["epoch"], + ) + self.insert1({**key, "pose_estimation_time": creation_time}) + + meters_per_pixel = key.pop("meters_per_pixel") + body_parts = dlc_result.df.columns.levels[0] + body_parts_df = {} + # Insert dlc pose estimation into analysis NWB file for + # each body part. + for body_part in bodyparts: + if body_part in body_parts: + body_parts_df[body_part] = pd.DataFrame.from_dict( { - "nwb_file_name": key["nwb_file_name"], - "epoch": key["epoch"], - }, - populate_missing=False, + c: dlc_result.df.get(body_part).get(c).values + for c in dlc_result.df.get(body_part).columns + } ) - ): - logger.logger.info("Getting raw position") - spatial_series = ( - RawPosition() - & {**key, "interval_list_name": interval_list_name} - ).fetch_nwb()[0]["raw_position"] - else: - spatial_series = None - - key["meters_per_pixel"] = meters_per_pixel - - # Insert entry into DLCPoseEstimation - logger.logger.info( - "Inserting %s, epoch %02d into DLCPoseEsimation", - key["nwb_file_name"], - key["epoch"], + idx = pd.IndexSlice + for body_part, part_df in body_parts_df.items(): + logger.info("converting to cm") + part_df = convert_to_cm(part_df, meters_per_pixel) + logger.info("adding timestamps to DataFrame") + part_df = add_timestamps( + part_df, pos_time=pos_time, video_time=video_time ) - self.insert1({**key, "pose_estimation_time": creation_time}) - meters_per_pixel = key["meters_per_pixel"] - del key["meters_per_pixel"] - body_parts = dlc_result.df.columns.levels[0] - body_parts_df = {} - # Insert dlc pose estimation into analysis NWB file for - # each body part. - for body_part in bodyparts: - if body_part in body_parts: - body_parts_df[body_part] = pd.DataFrame.from_dict( - { - c: dlc_result.df.get(body_part).get(c).values - for c in dlc_result.df.get(body_part).columns - } - ) - idx = pd.IndexSlice - for body_part, part_df in body_parts_df.items(): - logger.logger.info("converting to cm") - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) - part_df = convert_to_cm(part_df, meters_per_pixel) - logger.logger.info("adding timestamps to DataFrame") - part_df = add_timestamps( - part_df, - pos_time=getattr(spatial_series, "timestamps", video_time), - video_time=video_time, - ) - key["bodypart"] = body_part - position = pynwb.behavior.Position() - likelihood = pynwb.behavior.BehavioralTimeSeries() - position.create_spatial_series( - name="position", - timestamps=part_df.time.to_numpy(), - conversion=METERS_PER_CM, - data=part_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=getattr( - spatial_series, "reference_frame", "" - ), - comments=getattr(spatial_series, "comments", "no commwnts"), - description="x_position, y_position", - ) - likelihood.create_timeseries( - name="likelihood", - timestamps=part_df.time.to_numpy(), - data=part_df.loc[:, idx["likelihood"]].to_numpy(), - unit="likelihood", - comments="no comments", - description="likelihood", - ) - likelihood.create_timeseries( - name="video_frame_ind", - timestamps=part_df.time.to_numpy(), - data=part_df.loc[:, idx["video_frame_ind"]].to_numpy(), - unit="index", - comments="no comments", - description="video_frame_ind", - ) - nwb_analysis_file = AnalysisNwbfile() - key["dlc_pose_estimation_position_object_id"] = ( - nwb_analysis_file.add_nwb_object( - analysis_file_name=key["analysis_file_name"], - nwb_object=position, - ) - ) - key["dlc_pose_estimation_likelihood_object_id"] = ( - nwb_analysis_file.add_nwb_object( - analysis_file_name=key["analysis_file_name"], - nwb_object=likelihood, - ) + key["bodypart"] = body_part + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) + position = pynwb.behavior.Position() + likelihood = pynwb.behavior.BehavioralTimeSeries() + position.create_spatial_series( + name="position", + timestamps=part_df.time.to_numpy(), + conversion=METERS_PER_CM, + data=part_df.loc[:, idx[("x", "y")]].to_numpy(), + reference_frame=spatial_series.reference_frame, + comments=spatial_series.comments, + description="x_position, y_position", + ) + likelihood.create_timeseries( + name="likelihood", + timestamps=part_df.time.to_numpy(), + data=part_df.loc[:, idx["likelihood"]].to_numpy(), + unit="likelihood", + comments="no comments", + description="likelihood", + ) + likelihood.create_timeseries( + name="video_frame_ind", + timestamps=part_df.time.to_numpy(), + data=part_df.loc[:, idx["video_frame_ind"]].to_numpy(), + unit="index", + comments="no comments", + description="video_frame_ind", + ) + nwb_analysis_file = AnalysisNwbfile() + key["dlc_pose_estimation_position_object_id"] = ( + nwb_analysis_file.add_nwb_object( + analysis_file_name=key["analysis_file_name"], + nwb_object=position, ) - nwb_analysis_file.add( - nwb_file_name=key["nwb_file_name"], + ) + key["dlc_pose_estimation_likelihood_object_id"] = ( + nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], + nwb_object=likelihood, ) - self.BodyPart.insert1(key) - AnalysisNwbfile().log(key, table=self.full_table_name) + ) + nwb_analysis_file.add( + nwb_file_name=key["nwb_file_name"], + analysis_file_name=key["analysis_file_name"], + ) + self.BodyPart.insert1(key) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch_dataframe(self, *attrs, **kwargs): entries = (self.BodyPart & self).fetch("KEY") @@ -362,12 +366,7 @@ def fetch_dataframe(self, *attrs, **kwargs): ), name="time", ) - COLUMNS = [ - "video_frame_ind", - "x", - "y", - "likelihood", - ] + COLUMNS = ["video_frame_ind", "x", "y", "likelihood"] return pd.concat( { entry["bodypart"]: pd.DataFrame( diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index c18eafd62..cfad61c15 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -1,4 +1,4 @@ -from time import time +from pathlib import Path import datajoint as dj import numpy as np @@ -8,15 +8,16 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.position.v1.dlc_utils import ( _key_to_smooth_func_dict, + file_log, get_span_start_stop, + infer_output_dir, interp_pos, validate_option, validate_smooth_params, ) +from spyglass.position.v1.position_dlc_pose_estimation import DLCPoseEstimation from spyglass.settings import test_mode -from spyglass.utils.dj_mixin import SpyglassMixin - -from .position_dlc_pose_estimation import DLCPoseEstimation +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_position") @@ -34,11 +35,12 @@ class DLCSmoothInterpParams(SpyglassMixin, dj.Manual): whether to smooth the dataset smoothing_params : dict smoothing_duration : float, default 0.05 - number of frames to smooth over: sampling_rate*smoothing_duration = num_frames + number of frames to smooth over: + sampling_rate*smoothing_duration = num_frames interp_params : dict max_cm_to_interp : int, default 20 - maximum distance between high likelihood points on either side of a NaN span - to interpolate over + maximum distance between high likelihood points on either side of a + NaN span to interpolate over likelihood_thresh : float, default 0.95 likelihood below which to NaN and interpolate over """ @@ -127,7 +129,7 @@ def insert1(self, key, **kwargs): validate_option( params.get("likelihood_thresh"), name="likelihood_thresh", - types=(float), + types=float, val_range=(0, 1), ) @@ -139,8 +141,6 @@ class DLCSmoothInterpSelection(SpyglassMixin, dj.Manual): definition = """ -> DLCPoseEstimation.BodyPart -> DLCSmoothInterpParams - --- - """ @@ -158,125 +158,118 @@ class DLCSmoothInterp(SpyglassMixin, dj.Computed): dlc_smooth_interp_position_object_id : varchar(80) dlc_smooth_interp_info_object_id : varchar(80) """ + log_path = None def make(self, key): - from .dlc_utils import OutputLogger, infer_output_dir + self.log_path = ( + Path(infer_output_dir(key=key, makedir=False)) / "log.log" + ) + self._logged_make(key) + logger.info("inserted entry into DLCSmoothInterp") + + @file_log(logger, console=False) + def _logged_make(self, key): METERS_PER_CM = 0.01 - output_dir = infer_output_dir(key=key, makedir=False) - with OutputLogger( - name=f"{key['nwb_file_name']}_{key['epoch']}_{key['dlc_model_name']}_log", - path=f"{output_dir.as_posix()}/log.log", - print_console=False, - ) as logger: - AnalysisNwbfile()._creation_times["pre_create_time"] = time() - logger.logger.info("-----------------------") - idx = pd.IndexSlice - # Get labels to smooth from Parameters table - params = (DLCSmoothInterpParams() & key).fetch1("params") - # Get DLC output dataframe - logger.logger.info("fetching Pose Estimation Dataframe") - - bp_key = key.copy() - if test_mode: # during testing, analysis_file not in BodyPart table - bp_key.pop("analysis_file_name", None) - - dlc_df = (DLCPoseEstimation.BodyPart() & bp_key).fetch1_dataframe() + logger.info("-----------------------") + idx = pd.IndexSlice + # Get labels to smooth from Parameters table + params = (DLCSmoothInterpParams() & key).fetch1("params") + # Get DLC output dataframe + logger.info("fetching Pose Estimation Dataframe") + + bp_key = key.copy() + if test_mode: # during testing, analysis_file not in BodyPart table + bp_key.pop("analysis_file_name", None) + + dlc_df = (DLCPoseEstimation.BodyPart() & bp_key).fetch1_dataframe() + dt = np.median(np.diff(dlc_df.index.to_numpy())) + logger.info("Identifying indices to NaN") + df_w_nans, bad_inds = nan_inds( + dlc_df.copy(), + max_dist_between=params["max_cm_between_pts"], + likelihood_thresh=params.pop("likelihood_thresh"), + inds_to_span=params["num_inds_to_span"], + ) + + nan_spans = get_span_start_stop(np.where(bad_inds)[0]) + + if interp_params := params.get("interpolate"): + logger.info("interpolating across low likelihood times") + interp_df = interp_pos(df_w_nans.copy(), nan_spans, **interp_params) + else: + interp_df = df_w_nans.copy() + logger.info("skipping interpolation") + + if params.get("smooth"): + smooth_params = params.get("smoothing_params") + smooth_method = smooth_params.get("smooth_method") + smooth_func = _key_to_smooth_func_dict[smooth_method] + dt = np.median(np.diff(dlc_df.index.to_numpy())) - sampling_rate = 1 / dt - logger.logger.info("Identifying indices to NaN") - df_w_nans, bad_inds = nan_inds( - dlc_df.copy(), - params["max_cm_between_pts"], - likelihood_thresh=params.pop("likelihood_thresh"), - inds_to_span=params["num_inds_to_span"], + logger.info(f"Smoothing using method: {smooth_method}") + smooth_df = smooth_func( + interp_df, + smoothing_duration=smooth_params.get("smoothing_duration"), + sampling_rate=1 / dt, + **params["smoothing_params"], ) + else: + smooth_df = interp_df.copy() + logger.info("skipping smoothing") + + final_df = smooth_df.drop(["likelihood"], axis=1) + final_df = final_df.rename_axis("time").reset_index() + position_nwb_data = ( + (DLCPoseEstimation.BodyPart() & bp_key) + .fetch_nwb()[0]["dlc_pose_estimation_position"] + .get_spatial_series() + ) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) - nan_spans = get_span_start_stop(np.where(bad_inds)[0]) - if params["interpolate"]: - logger.logger.info("interpolating across low likelihood times") - interp_df = interp_pos( - df_w_nans.copy(), nan_spans, **params["interp_params"] - ) - else: - interp_df = df_w_nans.copy() - logger.logger.info("skipping interpolation") - if params["smooth"]: - if "smoothing_duration" in params["smoothing_params"]: - smoothing_duration = params["smoothing_params"].pop( - "smoothing_duration" - ) - dt = np.median(np.diff(dlc_df.index.to_numpy())) - sampling_rate = 1 / dt - logger.logger.info("smoothing position") - smooth_func = _key_to_smooth_func_dict[ - params["smoothing_params"]["smooth_method"] - ] - logger.logger.info( - "Smoothing using method: %s", - str(params["smoothing_params"]["smooth_method"]), - ) - smooth_df = smooth_func( - interp_df, - smoothing_duration=smoothing_duration, - sampling_rate=sampling_rate, - **params["smoothing_params"], - ) - else: - smooth_df = interp_df.copy() - logger.logger.info("skipping smoothing") - final_df = smooth_df.drop(["likelihood"], axis=1) - final_df = final_df.rename_axis("time").reset_index() - position_nwb_data = ( - (DLCPoseEstimation.BodyPart() & bp_key) - .fetch_nwb()[0]["dlc_pose_estimation_position"] - .get_spatial_series() - ) - key["analysis_file_name"] = AnalysisNwbfile().create( # logged - key["nwb_file_name"] - ) - # Add dataframe to AnalysisNwbfile - nwb_analysis_file = AnalysisNwbfile() - position = pynwb.behavior.Position() - video_frame_ind = pynwb.behavior.BehavioralTimeSeries() - logger.logger.info("Creating NWB objects") - position.create_spatial_series( - name="position", - timestamps=final_df.time.to_numpy(), - conversion=METERS_PER_CM, - data=final_df.loc[:, idx[("x", "y")]].to_numpy(), - reference_frame=position_nwb_data.reference_frame, - comments=position_nwb_data.comments, - description="x_position, y_position", - ) - video_frame_ind.create_timeseries( - name="video_frame_ind", - timestamps=final_df.time.to_numpy(), - data=final_df.loc[:, idx["video_frame_ind"]].to_numpy(), - unit="index", - comments="no comments", - description="video_frame_ind", - ) - key["dlc_smooth_interp_position_object_id"] = ( - nwb_analysis_file.add_nwb_object( - analysis_file_name=key["analysis_file_name"], - nwb_object=position, - ) - ) - key["dlc_smooth_interp_info_object_id"] = ( - nwb_analysis_file.add_nwb_object( - analysis_file_name=key["analysis_file_name"], - nwb_object=video_frame_ind, - ) + # Add dataframe to AnalysisNwbfile + nwb_analysis_file = AnalysisNwbfile() + position = pynwb.behavior.Position() + video_frame_ind = pynwb.behavior.BehavioralTimeSeries() + logger.info("Creating NWB objects") + position.create_spatial_series( + name="position", + timestamps=final_df.time.to_numpy(), + conversion=METERS_PER_CM, + data=final_df.loc[:, idx[("x", "y")]].to_numpy(), + reference_frame=position_nwb_data.reference_frame, + comments=position_nwb_data.comments, + description="x_position, y_position", + ) + video_frame_ind.create_timeseries( + name="video_frame_ind", + timestamps=final_df.time.to_numpy(), + data=final_df.loc[:, idx["video_frame_ind"]].to_numpy(), + unit="index", + comments="no comments", + description="video_frame_ind", + ) + key["dlc_smooth_interp_position_object_id"] = ( + nwb_analysis_file.add_nwb_object( + analysis_file_name=key["analysis_file_name"], + nwb_object=position, ) - nwb_analysis_file.add( - nwb_file_name=key["nwb_file_name"], + ) + key["dlc_smooth_interp_info_object_id"] = ( + nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], + nwb_object=video_frame_ind, ) - self.insert1(key) - logger.logger.info("inserted entry into DLCSmoothInterp") - AnalysisNwbfile().log(key, table=self.full_table_name) + ) + nwb_analysis_file.add( + nwb_file_name=key["nwb_file_name"], + analysis_file_name=key["analysis_file_name"], + ) + self.insert1(key) + AnalysisNwbfile().log(key, table=self.full_table_name) def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] @@ -356,6 +349,7 @@ def nan_inds( start_point = good_start[int(len(good_start) // 2)] else: start_point = span[0] + int(span_length(span) // 2) + for ind in range(start_point, span[0], -1): if subthresh_inds_mask[ind]: continue @@ -366,10 +360,11 @@ def nan_inds( ~subthresh_inds_mask[ind + 1 : start_point], ) )[0] - if len(previous_good_inds) >= 1: - last_good_ind = ind + 1 + np.min(previous_good_inds) - else: - last_good_ind = start_point + last_good_ind = ( + ind + 1 + np.min(previous_good_inds) + if len(previous_good_inds) > 0 + else start_point + ) good_x, good_y = dlc_df.loc[ idx[dlc_df.index[last_good_ind]], ["x", "y"] ] @@ -437,36 +432,34 @@ def get_good_spans(bad_inds_mask, inds_to_span: int = 50): modified_spans : list spans that are amended to bridge up to inds_to_span consecutive bad indices """ - good_spans = get_span_start_stop( - np.arange(len(bad_inds_mask))[~bad_inds_mask] - ) - if len(good_spans) > 1: - modified_spans = [] - for (start1, stop1), (start2, stop2) in zip( - good_spans[:-1], good_spans[1:] - ): - check_existing = [ - entry - for entry in modified_spans - if start1 - in range(entry[0] - inds_to_span, entry[1] + inds_to_span) - ] - if len(check_existing) > 0: - modify_ind = modified_spans.index(check_existing[0]) - if (start2 - stop1) <= inds_to_span: - modified_spans[modify_ind] = (check_existing[0][0], stop2) - else: - modified_spans[modify_ind] = (check_existing[0][0], stop1) - modified_spans.append((start2, stop2)) - continue + good = get_span_start_stop(np.arange(len(bad_inds_mask))[~bad_inds_mask]) + + if len(good) < 1: + return None, good + elif len(good) == 1: # if all good, no need to modify + return good, good + + modified_spans = [] + for (start1, stop1), (start2, stop2) in zip(good[:-1], good[1:]): + check_existing = [ + entry + for entry in modified_spans + if start1 in range(entry[0] - inds_to_span, entry[1] + inds_to_span) + ] + if len(check_existing) > 0: + modify_ind = modified_spans.index(check_existing[0]) if (start2 - stop1) <= inds_to_span: - modified_spans.append((start1, stop2)) + modified_spans[modify_ind] = (check_existing[0][0], stop2) else: - modified_spans.append((start1, stop1)) + modified_spans[modify_ind] = (check_existing[0][0], stop1) modified_spans.append((start2, stop2)) - return good_spans, modified_spans - else: - return None, good_spans + continue + if (start2 - stop1) <= inds_to_span: + modified_spans.append((start1, stop2)) + else: + modified_spans.append((start1, stop1)) + modified_spans.append((start2, stop2)) + return good, modified_spans def span_length(x): diff --git a/src/spyglass/position/v1/position_dlc_project.py b/src/spyglass/position/v1/position_dlc_project.py index 87ca4fab7..f2d377aef 100644 --- a/src/spyglass/position/v1/position_dlc_project.py +++ b/src/spyglass/position/v1/position_dlc_project.py @@ -1,20 +1,17 @@ import copy -import glob -import os import shutil from itertools import combinations from pathlib import Path, PosixPath from typing import Dict, List, Union import datajoint as dj -import numpy as np import pandas as pd -import ruamel.yaml +from ruamel.yaml import YAML from spyglass.common.common_lab import LabTeam -from spyglass.position.v1.dlc_utils import check_videofile, get_video_path +from spyglass.position.v1.dlc_utils import find_mp4, get_video_info from spyglass.settings import dlc_project_dir, dlc_video_dir -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_project") @@ -51,7 +48,7 @@ def add_from_config(cls, bodyparts: List, descriptions: List = None): bodyparts_dict = [ {"bodypart": bp, "bodypart_description": bp} for bp in bodyparts ] - cls.insert(bodyparts_dict, skip_duplicates=True) + cls().insert(bodyparts_dict, skip_duplicates=True) @schema @@ -90,14 +87,20 @@ class File(SpyglassMixin, dj.Part): """ def insert1(self, key, **kwargs): - assert isinstance( - key["project_name"], str - ), "project_name must be a string" - assert isinstance( - key["frames_per_video"], int - ), "frames_per_video must be of type `int`" + if not isinstance(key["project_name"], str): + raise ValueError("project_name must be a string") + if not isinstance(key["frames_per_video"], int): + raise ValueError("frames_per_video must be of type `int`") super().insert1(key, **kwargs) + def _existing_project(self, project_name): + if project_name in self.fetch("project_name"): + logger.warning(f"project name: {project_name} is already in use.") + return (self & {"project_name": project_name}).fetch( + "project_name", "config_path", as_dict=True + )[0] + return None + @classmethod def insert_existing_project( cls, @@ -123,46 +126,43 @@ def insert_existing_project( optional list of bodyparts to label that are not already in existing config """ - - # Read config - project_names_in_use = np.unique(cls.fetch("project_name")) - if project_name in project_names_in_use: - print(f"project name: {project_name} is already in use.") - return_key = {} - return_key["project_name"], return_key["config_path"] = ( - cls & {"project_name": project_name} - ).fetch1("project_name", "config_path") - return return_key from deeplabcut.utils.auxiliaryfunctions import read_config + if (existing := cls()._existing_project(project_name)) is not None: + return existing + cfg = read_config(config_path) + all_bodyparts = cfg["bodyparts"] if bodyparts: bodyparts_to_add = [ bodypart for bodypart in bodyparts if bodypart not in cfg["bodyparts"] ] - all_bodyparts = bodyparts_to_add + cfg["bodyparts"] - else: - all_bodyparts = cfg["bodyparts"] + all_bodyparts += bodyparts_to_add + BodyPart.add_from_config(cfg["bodyparts"]) for bodypart in all_bodyparts: if not bool(BodyPart() & {"bodypart": bodypart}): raise ValueError( f"bodypart: {bodypart} not found in BodyPart table" ) + # check bodyparts are in config, if not add if len(bodyparts_to_add) > 0: add_to_config(config_path, bodyparts=bodyparts_to_add) + # Get frames per video from config. If passed as arg, check match if frames_per_video: if frames_per_video != cfg["numframes2pick"]: add_to_config( config_path, **{"numframes2pick": frames_per_video} ) + config_path = Path(config_path) project_path = config_path.parent dlc_project_path = dlc_project_dir + if dlc_project_path not in project_path.as_posix(): project_dirname = project_path.name dest_folder = Path(f"{dlc_project_path}/{project_dirname}/") @@ -179,6 +179,7 @@ def insert_existing_project( ), "config.yaml does not exist in new project directory" config_path = new_config_path add_to_config(config_path, **{"project_path": new_proj_dir}) + # TODO still need to copy videos over to video dir key = { "project_name": project_name, @@ -187,21 +188,17 @@ def insert_existing_project( "config_path": config_path.as_posix(), "frames_per_video": frames_per_video, } - cls.insert1(key, **kwargs) - cls.BodyPart.insert( + cls().insert1(key, **kwargs) + cls().BodyPart.insert( [ {"project_name": project_name, "bodypart": bp} for bp in all_bodyparts ], **kwargs, ) - if add_to_files: - del key["bodyparts"] - del key["team_name"] - del key["config_path"] - del key["frames_per_video"] - # Check for training files to add - cls.add_training_files(key, **kwargs) + if add_to_files: # Check for training files to add + cls().add_training_files(key, **kwargs) + return { "project_name": project_name, "config_path": config_path.as_posix(), @@ -245,68 +242,24 @@ def insert_new_project( target path to output converted videos (Default is '/nimbus/deeplabcut/videos/') """ - project_names_in_use = np.unique(cls.fetch("project_name")) - if project_name in project_names_in_use: - print(f"project name: {project_name} is already in use.") - return_key = {} - return_key["project_name"], return_key["config_path"] = ( - cls & {"project_name": project_name} - ).fetch1("project_name", "config_path") - return return_key + from deeplabcut import create_new_project - add_to_files = kwargs.pop("add_to_files", True) + if (existing := cls()._existing_project(project_name)) is not None: + return existing if not bool(LabTeam() & {"team_name": lab_team}): - raise ValueError(f"team_name: {lab_team} does not exist in LabTeam") + raise ValueError(f"LabTeam does not exist: {lab_team}") + + add_to_files = kwargs.pop("add_to_files", True) skeleton_node = None # If dict, assume of form {'nwb_file_name': nwb_file_name, 'epoch': epoch} # and pass to get_video_path to reference VideoFile table for path - if all(isinstance(n, Dict) for n in video_list): - videos_to_convert = [ - get_video_path(video_key) for video_key in video_list - ] - videos = [ - check_videofile( - video_path=video[0], - output_path=output_path, - video_filename=video[1], - )[0].as_posix() - for video in videos_to_convert - if video[0] is not None - ] - if len(videos) < 1: - raise ValueError( - f"no .mp4 videos found in {videos_to_convert[0][0]}" - + f" for key: {video_list[0]}" - ) - - # If not dict, assume list of video file paths that may or may not need to be converted - else: - videos = [] - if not all([Path(video).exists() for video in video_list]): - raise OSError("at least one file in video_list does not exist") - for video in video_list: - video_path = Path(video).parent - video_filename = video.rsplit( - video_path.as_posix(), maxsplit=1 - )[-1].split("/")[-1] - videos.extend( - [ - check_videofile( - video_path=video_path, - output_path=output_path, - video_filename=video_filename, - )[0].as_posix() - ] - ) - if len(videos) < 1: - raise ValueError(f"no .mp4 videos found in{video_path}") - from deeplabcut import create_new_project + videos = cls()._process_videos(video_list, output_path) config_path = create_new_project( - project_name, - lab_team, - videos, + project=project_name, + experimenter=sanitize_filename(lab_team), + videos=videos, working_directory=project_directory, copy_videos=True, multianimal=False, @@ -318,9 +271,11 @@ def insert_new_project( ) kwargs_copy = copy.deepcopy(kwargs) kwargs_copy.update({"numframes2pick": frames_per_video, "dotsize": 3}) + add_to_config( config_path, bodyparts, skeleton_node=skeleton_node, **kwargs_copy ) + key = { "project_name": project_name, "team_name": lab_team, @@ -328,133 +283,138 @@ def insert_new_project( "config_path": config_path, "frames_per_video": frames_per_video, } - cls.insert1(key, **kwargs) - cls.BodyPart.insert( + cls().insert1(key, **kwargs) + cls().BodyPart.insert( [ {"project_name": project_name, "bodypart": bp} for bp in bodyparts ], **kwargs, ) - if add_to_files: - del key["bodyparts"] - del key["team_name"] - del key["config_path"] - del key["frames_per_video"] - # Add videos to training files - cls.add_training_files(key, **kwargs) + if add_to_files: # Add videos to training files + cls().add_training_files(key, **kwargs) + if isinstance(config_path, PosixPath): config_path = config_path.as_posix() return {"project_name": project_name, "config_path": config_path} + def _process_videos(self, video_list, output_path): + # If dict, assume {'nwb_file_name': nwb_file_name, 'epoch': epoch} + if all(isinstance(n, Dict) for n in video_list): + videos_to_convert = [] + for video in video_list: + if (video_path := get_video_info(video))[0] is not None: + videos_to_convert.append(video_path) + + else: # Otherwise, assume list of video file paths + if not all([Path(video).exists() for video in video_list]): + raise FileNotFoundError(f"Couldn't find video(s): {video_list}") + videos_to_convert = [] + for video in video_list: + vp = Path(video) + videos_to_convert.append((vp.parent, vp.name)) + + videos = [ + find_mp4( + video_path=video[0], + output_path=output_path, + video_filename=video[1], + ) + for video in videos_to_convert + ] + + if len(videos) < 1: + raise ValueError(f"no .mp4 videos found from {video_list}") + + return videos + @classmethod def add_video_files( cls, video_list, config_path=None, key=None, - output_path: str = os.getenv("DLC_VIDEO_PATH"), + output_path: str = dlc_video_dir, add_new=False, add_to_files=True, **kwargs, ): has_config_or_key = bool(config_path) or bool(key) - if add_new and not has_config_or_key: raise ValueError("If add_new, must provide key or config_path") - config_path = config_path or (cls & key).fetch1("config_path") - if ( - add_to_files - and not key - and len(cls & {"config_path": config_path}) != 1 - ): + config_path = config_path or (cls & key).fetch1("config_path") + has_proj = bool(key) or len(cls & {"config_path": config_path}) == 1 + if add_to_files and not has_proj: raise ValueError("Cannot set add_to_files=True without passing key") - if all(isinstance(n, Dict) for n in video_list): - videos_to_convert = [ - get_video_path(video_key) for video_key in video_list - ] - videos = [ - check_videofile( - video_path=video[0], - output_path=output_path, - video_filename=video[1], - )[0].as_posix() - for video in videos_to_convert - ] - # If not dict, assume list of video file paths - # that may or may not need to be converted - else: - videos = [] - if not all([Path(video).exists() for video in video_list]): - raise OSError("at least one file in video_list does not exist") - for video in video_list: - video_path = Path(video).parent - video_filename = video.rsplit( - video_path.as_posix(), maxsplit=1 - )[-1].split("/")[-1] - videos.append( - check_videofile( - video_path=video_path, - output_path=output_path, - video_filename=video_filename, - )[0].as_posix() - ) - if len(videos) < 1: - raise ValueError(f"no .mp4 videos found in{video_path}") + videos = cls()._process_videos(video_list, output_path) + if add_new: from deeplabcut import add_new_videos add_new_videos(config=config_path, videos=videos, copy_videos=True) - if add_to_files: - # Add videos to training files - cls.add_training_files(key, **kwargs) + + if add_to_files: # Add videos to training files + cls().add_training_files(key, **kwargs) return videos @classmethod def add_training_files(cls, key, **kwargs): """Add training videos and labeled frames .h5 and .csv to DLCProject.File""" + from deeplabcut.utils.auxiliaryfunctions import read_config + config_path = (cls & {"project_name": key["project_name"]}).fetch1( "config_path" ) - from deeplabcut.utils.auxiliaryfunctions import read_config - if "config_path" in key: - del key["config_path"] + key = { # Remove non-essential vals from key + k: v + for k, v in key.items() + if k + not in [ + "bodyparts", + "team_name", + "config_path", + "frames_per_video", + ] + } + cfg = read_config(config_path) - video_names = list(cfg["video_sets"].keys()) + video_names = list(cfg["video_sets"]) + label_dir = Path(cfg["project_path"]) / "labeled-data" training_files = [] + + video_inserts = [] for video in video_names: - video_name = os.path.splitext( - video.split(os.path.dirname(video) + "/")[-1] - )[0] - training_files.extend( - glob.glob( - f"{cfg['project_path']}/" - f"labeled-data/{video_name}/*Collected*" - ) + vid_path_obj = Path(video) + video_name = vid_path_obj.stem + training_files.extend((label_dir / video_name).glob("*Collected*")) + key.update( + { + "file_name": video_name, + "file_ext": vid_path_obj.suffix[1:], # remove leading '.' + "file_path": video, + } + ) + cls().File.insert(video_inserts, **kwargs) + + if len(training_files) == 0: + logger.warning("No training files to add") + return + + for file in training_files: + path_obj = Path(file) + cls().File.insert1( + { + **key, + "file_name": f"{path_obj.name}_labeled_data", + "file_ext": path_obj.suffix[1:], + "file_path": file, + }, + **kwargs, ) - for video in video_names: - key["file_name"] = f'{os.path.splitext(video.split("/")[-1])[0]}' - key["file_ext"] = os.path.splitext(video.split("/")[-1])[-1].split( - "." - )[-1] - key["file_path"] = video - cls.File.insert1(key, **kwargs) - if len(training_files) > 0: - for file in training_files: - video_name = os.path.dirname(file).split("/")[-1] - file_type = os.path.splitext( - file.split(os.path.dirname(file) + "/")[-1] - )[-1].split(".")[-1] - key["file_name"] = f"{video_name}_labeled_data" - key["file_ext"] = file_type - key["file_path"] = file - cls.File.insert1(key, **kwargs) - else: - Warning("No training files to add") @classmethod def run_extract_frames(cls, key, **kwargs): @@ -474,7 +434,11 @@ def run_label_frames(cls, key): cannot be run through ssh tunnel """ config_path = (cls & key).fetch1("config_path") - from deeplabcut import label_frames + try: + from deeplabcut import label_frames + except (ModuleNotFoundError, ImportError): + logger.error("DLC loaded in light mode, cannot label frames") + return label_frames(config_path) @@ -492,7 +456,7 @@ def check_labels(cls, key, **kwargs): def import_labeled_frames( cls, key: Dict, - import_project_path: Union[str, PosixPath], + new_proj_path: Union[str, PosixPath], video_filenames: Union[str, List], **kwargs, ): @@ -503,63 +467,46 @@ def import_labeled_frames( ---------- key : Dict key to specify entry in DLCProject table to add labeled frames to - import_project_path : str + new_proj_path : Union[str, PosixPath] absolute path to project directory containing labeled frames to import video_filenames : str or List - filename or list of filenames of video(s) - from which to import frames. - without file extension + filename or list of filenames of video(s) from which to import + frames. Without file extension """ project_entry = (cls & key).fetch1() - team_name = project_entry["team_name"] - current_project_path = Path(project_entry["config_path"]).parent - current_labeled_data_path = Path( - f"{current_project_path.as_posix()}/labeled-data" + team_name = project_entry["team_name"].replace(" ", "_") + this_proj_path = Path(project_entry["config_path"]).parent + this_data_path = this_proj_path / "labeled-data" + new_proj_path = Path(new_proj_path) # If Path(Path), no change + new_data_path = new_proj_path / "labeled-data" + + if not new_data_path.exists(): + raise FileNotFoundError(f"Cannot find directory: {new_data_path}") + + videos = ( + video_filenames + if isinstance(video_filenames, List) + else [video_filenames] ) - if isinstance(import_project_path, PosixPath): - assert import_project_path.exists(), ( - "import_project_path: " - f"{import_project_path.as_posix()} does not exist" - ) - import_labeled_data_path = Path( - f"{import_project_path.as_posix()}/labeled-data" - ) - else: - assert Path( - import_project_path - ).exists(), ( - f"import_project_path: {import_project_path} does not exist" - ) - import_labeled_data_path = Path( - f"{import_project_path}/labeled-data" - ) - assert ( - import_labeled_data_path.exists() - ), "import_project has no directory 'labeled-data'" - if not isinstance(video_filenames, List): - video_filenames = [video_filenames] - for video_file in video_filenames: - h5_file = glob.glob( - f"{import_labeled_data_path.as_posix()}/{video_file}/*.h5" - )[0] + for video_file in videos: + h5_file = next((new_data_path / video_file).glob("*h5")) dlc_df = pd.read_hdf(h5_file) dlc_df.columns = dlc_df.columns.set_levels([team_name], level=0) + new_video_path = this_data_path / video_file + new_video_path.mkdir(exist_ok=True) dlc_df.to_hdf( - Path( - f"{current_labeled_data_path.as_posix()}/" - f"{video_file}/CollectedData_{team_name}.h5" - ).as_posix(), + new_video_path / f"CollectedData_{team_name}.h5", "df_with_missing", ) - cls.add_training_files(key, **kwargs) + cls().add_training_files(key, **kwargs) def add_to_config( config, bodyparts: List = None, skeleton_node: str = None, **kwargs ): - """ - Add necessary items to the config.yaml for the model + """Add necessary items to the config.yaml for the model + Parameters ---------- config : str @@ -572,28 +519,43 @@ def add_to_config( Other parameters of config to modify in key:value pairs """ - yaml = ruamel.yaml.YAML() + yaml = YAML() with open(config) as fp: data = yaml.load(fp) + if bodyparts: data["bodyparts"] = bodyparts - led_parts = [element for element in bodyparts if "LED" in element] - if skeleton_node is not None: - bodypart_skeleton = [ + led_parts = [bp for bp in bodyparts if "LED" in bp] + bodypart_skeleton = ( + [ list(link) for link in combinations(led_parts, 2) if skeleton_node in link ] - else: - bodypart_skeleton = list(combinations(led_parts, 2)) + if skeleton_node + else list(combinations(led_parts, 2)) + ) other_parts = list(set(bodyparts) - set(led_parts)) for ind, part in enumerate(other_parts): other_parts[ind] = [part, part] bodypart_skeleton.append(other_parts) data["skeleton"] = bodypart_skeleton - for kwarg, val in kwargs.items(): - if not isinstance(kwarg, str): - kwarg = str(kwarg) - data[kwarg] = val + + kwargs.update( + {str(k): v for k, v in kwargs.items() if not isinstance(k, str)} + ) + with open(config, "w") as fw: yaml.dump(data, fw) + + +def sanitize_filename(filename: str) -> str: + """Sanitize filename to remove special characters""" + char_map = { + " ": "_", + ".": "_", + ",": "-", + "&": "and", + "'": "", + } + return "".join([char_map.get(c, c) for c in filename]) diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 8a283bb1d..7d90a6d2a 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -12,7 +12,7 @@ convert_epoch_interval_name_to_position_interval_name, ) from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.position.v1.dlc_utils import make_video +from spyglass.position.v1.dlc_utils_makevid import make_video from spyglass.position.v1.position_dlc_centroid import DLCCentroid from spyglass.position.v1.position_dlc_cohort import DLCSmoothInterpCohort from spyglass.position.v1.position_dlc_orient import DLCOrientation @@ -21,7 +21,7 @@ DLCPoseEstimationSelection, ) from spyglass.position.v1.position_dlc_position import DLCSmoothInterpParams -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_selection") @@ -106,39 +106,46 @@ def make(self, key): velocity.create_timeseries( name=vid_frame_obj.name, - unit=vid_frame_obj.unit, timestamps=np.asarray(vid_frame_obj.timestamps), + unit=vid_frame_obj.unit, data=np.asarray(vid_frame_obj.data), description=vid_frame_obj.description, comments=vid_frame_obj.comments, ) - key["analysis_file_name"] = AnalysisNwbfile().create( - key["nwb_file_name"] - ) + # Add to Analysis NWB file + analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = analysis_file_name nwb_analysis_file = AnalysisNwbfile() - key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( - key["analysis_file_name"], orientation - ) - key["position_object_id"] = nwb_analysis_file.add_nwb_object( - key["analysis_file_name"], position - ) - key["velocity_object_id"] = nwb_analysis_file.add_nwb_object( - key["analysis_file_name"], velocity + + key.update( + { + "analysis_file_name": analysis_file_name, + "position_object_id": nwb_analysis_file.add_nwb_object( + analysis_file_name, position + ), + "orientation_object_id": nwb_analysis_file.add_nwb_object( + analysis_file_name, orientation + ), + "velocity_object_id": nwb_analysis_file.add_nwb_object( + analysis_file_name, velocity + ), + } ) nwb_analysis_file.add( nwb_file_name=key["nwb_file_name"], - analysis_file_name=key["analysis_file_name"], + analysis_file_name=analysis_file_name, ) self.insert1(key) from ..position_merge import PositionOutput - part_name = to_camel_case(self.table_name.split("__")[-1]) # TODO: The next line belongs in a merge table function PositionOutput._merge_insert( - [orig_key], part_name=part_name, skip_duplicates=True + [orig_key], + part_name=to_camel_case(self.table_name.split("__")[-1]), + skip_duplicates=True, ) AnalysisNwbfile().log(key, table=self.full_table_name) @@ -187,23 +194,24 @@ def fetch_nwb(self, **kwargs): @classmethod def evaluate_pose_estimation(cls, key): likelihood_thresh = [] - valid_fields = ( - DLCSmoothInterpCohort.BodyPart().fetch().dtype.fields.keys() - ) + + valid_fields = DLCSmoothInterpCohort.BodyPart().heading.names centroid_key = {k: val for k, val in key.items() if k in valid_fields} centroid_key["dlc_si_cohort_selection_name"] = key[ "dlc_si_cohort_centroid" ] + centroid_bodyparts, centroid_si_params = ( + DLCSmoothInterpCohort.BodyPart & centroid_key + ).fetch("bodypart", "dlc_si_params_name") + orientation_key = centroid_key.copy() orientation_key["dlc_si_cohort_selection_name"] = key[ "dlc_si_cohort_orientation" ] - centroid_bodyparts, centroid_si_params = ( - DLCSmoothInterpCohort.BodyPart & centroid_key - ).fetch("bodypart", "dlc_si_params_name") orientation_bodyparts, orientation_si_params = ( DLCSmoothInterpCohort.BodyPart & orientation_key ).fetch("bodypart", "dlc_si_params_name") + for param in np.unique( np.concatenate((centroid_si_params, orientation_si_params)) ): @@ -215,9 +223,10 @@ def evaluate_pose_estimation(cls, key): if len(np.unique(likelihood_thresh)) > 1: raise ValueError("more than one likelihood threshold used") + like_thresh = likelihood_thresh[0] bodyparts = np.unique([*centroid_bodyparts, *orientation_bodyparts]) - fields = list(DLCPoseEstimation.BodyPart.fetch().dtype.fields.keys()) + fields = DLCPoseEstimation.BodyPart.heading.names pose_estimation_key = {k: v for k, v in key.items() if k in fields} pose_estimation_df = pd.concat( { @@ -308,134 +317,108 @@ class DLCPosVideo(SpyglassMixin, dj.Computed): --- """ - # TODO: Shoultn't this keep track of the video file it creates? - def make(self, key): - from tqdm import tqdm as tqdm + M_TO_CM = 100 params = (DLCPosVideoParams & key).fetch1("params") - if "video_params" not in params: - params["video_params"] = {} - M_TO_CM = 100 - epoch = key["epoch"] - pose_estimation_key = { + + interval_name = convert_epoch_interval_name_to_position_interval_name( + { + "nwb_file_name": key["nwb_file_name"], + "epoch": key["epoch"], + }, + populate_missing=False, + ) + epoch = ( + int(interval_name.replace("pos ", "").replace(" valid times", "")) + + 1 + ) + pose_est_key = { "nwb_file_name": key["nwb_file_name"], "epoch": epoch, "dlc_model_name": key["dlc_model_name"], "dlc_model_params_name": key["dlc_model_params_name"], } - pose_estimation_params, video_filename, output_dir = ( - DLCPoseEstimationSelection() & pose_estimation_key + + pose_estimation_params, video_filename, output_dir, meters_per_pixel = ( + DLCPoseEstimationSelection * DLCPoseEstimation & pose_est_key ).fetch1( - "pose_estimation_params", "video_path", "pose_estimation_output_dir" - ) - print(f"video filename: {video_filename}") - meters_per_pixel = (DLCPoseEstimation() & pose_estimation_key).fetch1( - "meters_per_pixel" + "pose_estimation_params", + "video_path", + "pose_estimation_output_dir", + "meters_per_pixel", ) - crop = None - if "cropping" in pose_estimation_params: - crop = pose_estimation_params["cropping"] - print("Loading position data...") - position_info_df = ( - DLCPosV1() - & { - "nwb_file_name": key["nwb_file_name"], - "epoch": epoch, - "dlc_si_cohort_centroid": key["dlc_si_cohort_centroid"], - "dlc_centroid_params_name": key["dlc_centroid_params_name"], - "dlc_si_cohort_orientation": key["dlc_si_cohort_orientation"], - "dlc_orientation_params_name": key[ - "dlc_orientation_params_name" - ], - } + + logger.info(f"video filename: {video_filename}") + logger.info("Loading position data...") + + v1_key = {k: v for k, v in key.items() if k in DLCPosV1.primary_key} + pos_info_df = ( + DLCPosV1() & {"epoch": epoch, **v1_key} ).fetch1_dataframe() - pose_estimation_df = pd.concat( + pos_est_df = pd.concat( { bodypart: ( DLCPoseEstimation.BodyPart() - & {**pose_estimation_key, **{"bodypart": bodypart}} + & {**pose_est_key, **{"bodypart": bodypart}} ).fetch1_dataframe() - for bodypart in ( - DLCSmoothInterpCohort.BodyPart & pose_estimation_key - ) + for bodypart in (DLCSmoothInterpCohort.BodyPart & pose_est_key) .fetch("bodypart") .tolist() }, axis=1, ) - assert len(pose_estimation_df) == len(position_info_df), ( - f"length of pose_estimation_df: {len(pose_estimation_df)} " - f"does not match the length of position_info_df: {len(position_info_df)}." - ) + if not len(pos_est_df) == len(pos_info_df): + raise ValueError( + "Dataframes are not the same length\n" + + f"\tPose estim : {len(pos_est_df)}\n" + + f"\tPosition info: {len(pos_info_df)}" + ) - nwb_base_filename = key["nwb_file_name"].replace(".nwb", "") + output_video_filename = ( + key["nwb_file_name"].replace(".nwb", "") + + f"_{epoch:02d}_" + + f'{key["dlc_si_cohort_centroid"]}_' + + f'{key["dlc_centroid_params_name"]}' + + f'{key["dlc_orientation_params_name"]}.mp4' + ) if Path(output_dir).exists(): - output_video_filename = ( - f"{Path(output_dir).as_posix()}/" - f"{nwb_base_filename}_{epoch:02d}_" - f'{key["dlc_si_cohort_centroid"]}_' - f'{key["dlc_centroid_params_name"]}' - f'{key["dlc_orientation_params_name"]}.mp4' - ) - else: - output_video_filename = ( - f"{nwb_base_filename}_{epoch:02d}_" - f'{key["dlc_si_cohort_centroid"]}_' - f'{key["dlc_centroid_params_name"]}' - f'{key["dlc_orientation_params_name"]}.mp4' - ) + output_video_filename = Path(output_dir) / output_video_filename + idx = pd.IndexSlice - video_frame_inds = ( - position_info_df["video_frame_ind"].astype(int).to_numpy() - ) + video_frame_inds = pos_info_df["video_frame_ind"].astype(int).to_numpy() centroids = { - bodypart: pose_estimation_df.loc[ - :, idx[bodypart, ("x", "y")] - ].to_numpy() - for bodypart in pose_estimation_df.columns.levels[0] + bodypart: pos_est_df.loc[:, idx[bodypart, ("x", "y")]].to_numpy() + for bodypart in pos_est_df.columns.levels[0] } - if params.get("incl_likelihood", None): - likelihoods = { - bodypart: pose_estimation_df.loc[ + likelihoods = ( + { + bodypart: pos_est_df.loc[ :, idx[bodypart, ("likelihood")] ].to_numpy() - for bodypart in pose_estimation_df.columns.levels[0] + for bodypart in pos_est_df.columns.levels[0] } - else: - likelihoods = None - position_mean = { - "DLC": np.asarray(position_info_df[["position_x", "position_y"]]) - } - orientation_mean = { - "DLC": np.asarray(position_info_df[["orientation"]]) - } - position_time = np.asarray(position_info_df.index) - cm_per_pixel = meters_per_pixel * M_TO_CM - percent_frames = params.get("percent_frames", None) + if params.get("incl_likelihood") + else None + ) frames = params.get("frames", None) - if frames is not None: - frames_arr = np.arange(frames[0], frames[1]) - else: - frames_arr = frames - print("Making video...") make_video( video_filename=video_filename, video_frame_inds=video_frame_inds, - position_mean=position_mean, - orientation_mean=orientation_mean, + position_mean={ + "DLC": np.asarray(pos_info_df[["position_x", "position_y"]]) + }, + orientation_mean={"DLC": np.asarray(pos_info_df[["orientation"]])}, centroids=centroids, likelihoods=likelihoods, - position_time=position_time, - video_time=None, + position_time=np.asarray(pos_info_df.index), processor=params.get("processor", "opencv"), - frames=frames_arr, - percent_frames=percent_frames, + frames=np.arange(frames[0], frames[1]) if frames else None, + percent_frames=params.get("percent_frames", None), output_video_filename=output_video_filename, - cm_to_pixels=cm_per_pixel, - disable_progressbar=False, - crop=crop, - **params["video_params"], + cm_to_pixels=meters_per_pixel * M_TO_CM, + crop=pose_estimation_params.get("cropping"), + **params.get("video_params", {}), ) self.insert1(key) diff --git a/src/spyglass/position/v1/position_dlc_training.py b/src/spyglass/position/v1/position_dlc_training.py index 393eb6af9..7876754f5 100644 --- a/src/spyglass/position/v1/position_dlc_training.py +++ b/src/spyglass/position/v1/position_dlc_training.py @@ -4,10 +4,10 @@ import datajoint as dj -from spyglass.position.v1.dlc_utils import OutputLogger +from spyglass.position.v1.dlc_utils import file_log from spyglass.position.v1.position_dlc_project import DLCProject from spyglass.settings import test_mode -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_v1_dlc_training") @@ -22,13 +22,13 @@ class DLCModelTrainingParams(SpyglassMixin, dj.Lookup): params : longblob # dictionary of all applicable parameters """ - required_parameters = ( + required_params = ( "shuffle", "trainingsetindex", "net_type", "gputouse", ) - skipped_parameters = ("project_path", "video_sets") + skipped_params = ("project_path", "video_sets") @classmethod def insert_new_params(cls, paramset_name: str, params: dict, **kwargs): @@ -45,76 +45,52 @@ def insert_new_params(cls, paramset_name: str, params: dict, **kwargs): project_path and video_sets will be overwritten by config.yaml. Note that trainingsetindex is 0-indexed """ + if not set(cls.required_params).issubset(params): + raise ValueError(f"Missing required params: {cls.required_params}") + params = { + k: v for k, v in params.items() if k not in cls.skipped_params + } - for required_param in cls.required_parameters: - assert required_param in params, ( - "Missing required parameter: " + required_param - ) - for skipped_param in cls.skipped_parameters: - if skipped_param in params: - params.pop(skipped_param) + param_pk = {"dlc_training_params_name": paramset_name} + param_query = cls & param_pk - param_dict = { - "dlc_training_params_name": paramset_name, - "params": params, - } - param_query = cls & { - "dlc_training_params_name": param_dict["dlc_training_params_name"] - } - # If the specified param-set already exists - # Not sure we need this part, as much just a check if the name is the same if param_query: - existing_paramset_name = param_query.fetch1( - "dlc_training_params_name" + logger.info( + f"New param set not added\n" + f"A param set with name: {paramset_name} already exists" ) - if ( - existing_paramset_name == paramset_name - ): # If existing name same: - return print( - f"New param set not added\n" - f"A param set with name: {paramset_name} already exists" - ) - else: - cls.insert1( - param_dict, **kwargs - ) # if duplicate, will raise duplicate error - # if this will raise duplicate error, why is above check needed? @datajoint + return + cls.insert1({**param_pk, "params": params}, **kwargs) @classmethod def get_accepted_params(cls): from deeplabcut import create_training_dataset, train_network - return list( - set( - [ - *list(inspect.signature(train_network).parameters), - *list( - inspect.signature(create_training_dataset).parameters - ), - ] - ) + return set( + [ + *get_param_names(train_network), + *get_param_names(create_training_dataset), + ] ) @schema class DLCModelTrainingSelection(SpyglassMixin, dj.Manual): - definition = """ # Specification for a DLC model training instance + definition = """ # Specification for a DLC model training instance -> DLCProject -> DLCModelTrainingParams - training_id : int # unique integer, + training_id : int # unique integer # allows for multiple training runs for a specific parameter set and project --- model_prefix='' : varchar(32) """ - def insert1(self, key, **kwargs): - training_id = key.get("training_id") - if training_id is None: + def insert1(self, key, **kwargs): # Auto-increment training_id + if not (training_id := key.get("training_id")): training_id = ( dj.U().aggr(self & key, n="max(training_id)").fetch1("n") or 0 ) + 1 - key["training_id"] = training_id - super().insert1(key, **kwargs) + super().insert1({**key, "training_id": training_id}, **kwargs) @schema @@ -126,13 +102,20 @@ class DLCModelTraining(SpyglassMixin, dj.Computed): latest_snapshot: int unsigned # latest exact snapshot index (i.e., never -1) config_template: longblob # stored full config file """ + log_path = None - # To continue from previous training snapshot, devs suggest editing pose_cfg.yml + # To continue from previous training snapshot, + # devs suggest editing pose_cfg.yml # https://github.com/DeepLabCut/DeepLabCut/issues/70 def make(self, key): - """Launch training for each entry in DLCModelTrainingSelection via `.populate()`.""" - model_prefix = (DLCModelTrainingSelection & key).fetch1("model_prefix") + """Launch training for each entry in DLCModelTrainingSelection.""" + config_path = (DLCProject & key).fetch1("config_path") + self.log_path = Path(config_path).parent / "log.log" + self._logged_make(key) + + @file_log(logger, console=True) # THIS WORKS + def _logged_make(self, key): from deeplabcut import create_training_dataset, train_network from deeplabcut.utils.auxiliaryfunctions import read_config @@ -144,111 +127,106 @@ def make(self, key): from deeplabcut.utils.auxiliaryfunctions import ( GetModelFolder as get_model_folder, ) + + model_prefix = (DLCModelTrainingSelection & key).fetch1("model_prefix") config_path, project_name = (DLCProject() & key).fetch1( "config_path", "project_name" ) - with OutputLogger( - name="DLC_project_{project_name}_training", - path=f"{os.path.dirname(config_path)}/log.log", - print_console=True, - ) as logger: - dlc_config = read_config(config_path) - project_path = dlc_config["project_path"] - key["project_path"] = project_path - # ---- Build and save DLC configuration (yaml) file ---- - _, dlc_config = dlc_reader.read_yaml(project_path) - if not dlc_config: - dlc_config = read_config(config_path) - dlc_config.update((DLCModelTrainingParams & key).fetch1("params")) - dlc_config.update( - { - "project_path": Path(project_path).as_posix(), - "modelprefix": model_prefix, - "train_fraction": dlc_config["TrainingFraction"][ - int(dlc_config["trainingsetindex"]) - ], - "training_filelist_datajoint": [ # don't overwrite origin video_sets - Path(fp).as_posix() - for fp in (DLCProject.File & key).fetch("file_path") - ], - } - ) - # Write dlc config file to base project folder - # TODO: need to make sure this will work - dlc_cfg_filepath = dlc_reader.save_yaml(project_path, dlc_config) - # ---- create training dataset ---- - training_dataset_input_args = list( - inspect.signature(create_training_dataset).parameters - ) - training_dataset_kwargs = { - k: v - for k, v in dlc_config.items() - if k in training_dataset_input_args + + dlc_config = read_config(config_path) + project_path = dlc_config["project_path"] + key["project_path"] = project_path + + # ---- Build and save DLC configuration (yaml) file ---- + dlc_config = dlc_reader.read_yaml(project_path)[1] or read_config( + config_path + ) + dlc_config.update( + { + **(DLCModelTrainingParams & key).fetch1("params"), + "project_path": Path(project_path).as_posix(), + "modelprefix": model_prefix, + "train_fraction": dlc_config["TrainingFraction"][ + int(dlc_config.get("trainingsetindex", 0)) + ], + "training_filelist_datajoint": [ # don't overwrite origin video_sets + Path(fp).as_posix() + for fp in (DLCProject.File & key).fetch("file_path") + ], } - logger.logger.info("creating training dataset") - # err here - create_training_dataset(dlc_cfg_filepath, **training_dataset_kwargs) - # ---- Trigger DLC model training job ---- - train_network_input_args = list( - inspect.signature(train_network).parameters + ) + + # Write dlc config file to base project folder + dlc_cfg_filepath = dlc_reader.save_yaml(project_path, dlc_config) + # ---- create training dataset ---- + training_dataset_input_args = list( + inspect.signature(create_training_dataset).parameters + ) + training_dataset_kwargs = { + k: v + for k, v in dlc_config.items() + if k in training_dataset_input_args + } + logger.info("creating training dataset") + create_training_dataset(dlc_cfg_filepath, **training_dataset_kwargs) + # ---- Trigger DLC model training job ---- + train_network_kwargs = { + k: v + for k, v in dlc_config.items() + if k in get_param_names(train_network) + } + for k in ["shuffle", "trainingsetindex", "maxiters"]: + if value := train_network_kwargs.get(k): + train_network_kwargs[k] = int(value) + if test_mode: + train_network_kwargs["maxiters"] = 2 + + try: + train_network(dlc_cfg_filepath, **train_network_kwargs) + except KeyboardInterrupt: + logger.info("DLC training stopped via Keyboard Interrupt") + + snapshots = ( + project_path + / get_model_folder( + trainFraction=dlc_config["train_fraction"], + shuffle=dlc_config["shuffle"], + cfg=dlc_config, + modelprefix=dlc_config["modelprefix"], ) - train_network_kwargs = { - k: v - for k, v in dlc_config.items() - if k in train_network_input_args + / "train" + ).glob("*index*") + + # DLC goes by snapshot magnitude when judging 'latest' for + # evaluation. Here, we mean most recently generated + max_modified_time = 0 + for snapshot in snapshots: + modified_time = os.path.getmtime(snapshot) + if modified_time > max_modified_time: + latest_snapshot = int(snapshot.stem[9:]) + max_modified_time = modified_time + + self.insert1( + { + **key, + "latest_snapshot": latest_snapshot, + "config_template": dlc_config, } - for k in ["shuffle", "trainingsetindex", "maxiters"]: - if k in train_network_kwargs: - train_network_kwargs[k] = int(train_network_kwargs[k]) - if test_mode: - train_network_kwargs["maxiters"] = 2 - try: - train_network(dlc_cfg_filepath, **train_network_kwargs) - except ( - KeyboardInterrupt - ): # Instructions indicate to train until interrupt - logger.logger.info( - "DLC training stopped via Keyboard Interrupt" - ) - - snapshots = list( - ( - project_path - / get_model_folder( - trainFraction=dlc_config["train_fraction"], - shuffle=dlc_config["shuffle"], - cfg=dlc_config, - modelprefix=dlc_config["modelprefix"], - ) - / "train" - ).glob("*index*") - ) - max_modified_time = 0 - # DLC goes by snapshot magnitude when judging 'latest' for evaluation - # Here, we mean most recently generated - for snapshot in snapshots: - modified_time = os.path.getmtime(snapshot) - if modified_time > max_modified_time: - latest_snapshot = int(snapshot.stem[9:]) - max_modified_time = modified_time - - self.insert1( - { - **key, - "latest_snapshot": latest_snapshot, - "config_template": dlc_config, - } - ) - from .position_dlc_model import DLCModelSource - - dlc_model_name = f"{key['project_name']}_{key['dlc_training_params_name']}_{key['training_id']:02d}" - DLCModelSource.insert_entry( - dlc_model_name=dlc_model_name, - project_name=key["project_name"], - source="FromUpstream", - key=key, - skip_duplicates=True, - ) - print( - f"Inserted {dlc_model_name} from {key['project_name']} into DLCModelSource" ) + from .position_dlc_model import DLCModelSource + + dlc_model_name = ( + f"{key['project_name']}_" + + f"{key['dlc_training_params_name']}_{key['training_id']:02d}" + ) + DLCModelSource.insert_entry( + dlc_model_name=dlc_model_name, + project_name=key["project_name"], + source="FromUpstream", + key=key, + skip_duplicates=True, + ) + + +def get_param_names(func): + return list(inspect.signature(func).parameters) diff --git a/src/spyglass/position/v1/position_trodes_position.py b/src/spyglass/position/v1/position_trodes_position.py index 86487ad23..501407571 100644 --- a/src/spyglass/position/v1/position_trodes_position.py +++ b/src/spyglass/position/v1/position_trodes_position.py @@ -1,16 +1,15 @@ import copy import os -from pathlib import Path import datajoint as dj import numpy as np from datajoint.utils import to_camel_case -from tqdm import tqdm as tqdm from spyglass.common.common_behav import RawPosition from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.common.common_position import IntervalPositionInfo -from spyglass.position.v1.dlc_utils import check_videofile, get_video_path +from spyglass.common.common_position import IntervalPositionInfo, _fix_col_names +from spyglass.position.v1.dlc_utils import find_mp4, get_video_info +from spyglass.position.v1.dlc_utils_makevid import make_video from spyglass.settings import test_mode from spyglass.utils import SpyglassMixin, logger @@ -250,14 +249,14 @@ def make(self, key): M_TO_CM = 100 logger.info("Loading position data...") - raw_position_df = ( + raw_df = ( RawPosition.PosObject & { "nwb_file_name": key["nwb_file_name"], "interval_list_name": key["interval_list_name"], } ).fetch1_dataframe() - position_info_df = (TrodesPosV1() & key).fetch1_dataframe() + pos_df = (TrodesPosV1() & key).fetch1_dataframe() logger.info("Loading video data...") epoch = ( @@ -274,7 +273,7 @@ def make(self, key): video_filename, meters_per_pixel, video_time, - ) = get_video_path( + ) = get_video_info( {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} ) @@ -282,222 +281,39 @@ def make(self, key): self.insert1(dict(**key, has_video=False)) return - video_dir = os.path.dirname(video_path) + "/" - video_path = check_videofile( - video_path=video_dir, video_filename=video_filename - )[0].as_posix() - nwb_base_filename = key["nwb_file_name"].replace(".nwb", "") - current_dir = Path(os.getcwd()) - output_video_filename = ( - f"{current_dir.as_posix()}/{nwb_base_filename}_" - f"{epoch:02d}_{key['trodes_pos_params_name']}.mp4" - ) - red_cols = ( - ["xloc", "yloc"] - if "xloc" in raw_position_df.columns - else ["xloc1", "yloc1"] - ) - centroids = { - "red": np.asarray(raw_position_df[red_cols]), - "green": np.asarray(raw_position_df[["xloc2", "yloc2"]]), - } - position_mean = np.asarray( - position_info_df[["position_x", "position_y"]] + video_path = find_mp4( + video_path=os.path.dirname(video_path) + "/", + video_filename=video_filename, ) - orientation_mean = np.asarray(position_info_df[["orientation"]]) - position_time = np.asarray(position_info_df.index) - cm_per_pixel = meters_per_pixel * M_TO_CM - logger.info("Making video...") - self.make_video( - video_path, - centroids, - position_mean, - orientation_mean, - video_time, - position_time, - output_video_filename=output_video_filename, - cm_to_pixels=cm_per_pixel, - disable_progressbar=False, + output_video_filename = ( + key["nwb_file_name"].replace(".nwb", "") + + f"_{epoch:02d}_" + + f'{key["trodes_pos_params_name"]}.mp4' ) - self.insert1(dict(**key, has_video=True)) - - @staticmethod - def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): - """Converts from cm to pixels and flips the y-axis. - Parameters - ---------- - data : ndarray, shape (n_time, 2) - frame_size : array_like, shape (2,) - cm_to_pixels : float - Returns - ------- - converted_data : ndarray, shape (n_time, 2) - """ - return data / cm_to_pixels + adj_df = _fix_col_names(raw_df) # adjust 'xloc1' to 'xloc' - @staticmethod - def fill_nan(variable, video_time, variable_time, truncate_data=False): - """Fill in missing values in variable with nans at video_time. - - Parameters - ---------- - variable : ndarray, shape (n_time,) or (n_time, n_dims) - The variable to fill in. - video_time : ndarray, shape (n_video_time,) - The time points of the video. - variable_time : ndarray, shape (n_variable_time,) - The time points of the variable. - """ - # TODO: Reduce duplication across dlc_utils and common_position - - video_ind = np.digitize(variable_time, video_time[1:]) - n_video_time = len(video_time) - - try: - n_variable_dims = variable.shape[1] - filled_variable = np.full((n_video_time, n_variable_dims), np.nan) - except IndexError: - filled_variable = np.full((n_video_time,), np.nan) - - filled_variable[video_ind] = variable - - return filled_variable - - def make_video( - self, - video_filename, - centroids, - position_mean, - orientation_mean, - video_time, - position_time, - output_video_filename="output.mp4", - cm_to_pixels=1.0, - disable_progressbar=False, - arrow_radius=15, - circle_radius=8, - truncate_data=False, # reduce data to min length across all variables - ): - import cv2 - - RGB_PINK = (234, 82, 111) - RGB_YELLOW = (253, 231, 76) - RGB_WHITE = (255, 255, 255) - - video = cv2.VideoCapture(video_filename) - fourcc = cv2.VideoWriter_fourcc(*"mp4v") - frame_size = (int(video.get(3)), int(video.get(4))) - frame_rate = video.get(5) - n_frames = int(orientation_mean.shape[0]) - logger.info(f"video filepath: {output_video_filename}") - out = cv2.VideoWriter( - output_video_filename, fourcc, frame_rate, frame_size, True - ) - - if test_mode or truncate_data: + if test_mode: # pytest video data has mismatched shapes in some cases - # centroid (267, 2), video_time (270, 2), position_time (5193,) - min_len = min( - n_frames, - len(video_time), - len(position_time), - len(position_mean), - len(orientation_mean), - min(len(v) for v in centroids.values()), - ) - n_frames = min_len + min_len = min(len(adj_df), len(pos_df), len(video_time)) + adj_df = adj_df[:min_len] + pos_df = pos_df[:min_len] video_time = video_time[:min_len] - position_time = position_time[:min_len] - position_mean = position_mean[:min_len] - orientation_mean = orientation_mean[:min_len] - for color, data in centroids.items(): - centroids[color] = data[:min_len] - - centroids = { - color: self.fill_nan( - variable=data, - video_time=video_time, - variable_time=position_time, - ) - for color, data in centroids.items() - } - position_mean = self.fill_nan(position_mean, video_time, position_time) - orientation_mean = self.fill_nan( - orientation_mean, video_time, position_time - ) - for time_ind in tqdm( - range(n_frames - 1), desc="frames", disable=disable_progressbar - ): - is_grabbed, frame = video.read() - if is_grabbed: - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - red_centroid = centroids["red"][time_ind] - green_centroid = centroids["green"][time_ind] - - position = position_mean[time_ind] - position = self.convert_to_pixels( - position, frame_size, cm_to_pixels - ) - orientation = orientation_mean[time_ind] - - if np.all(~np.isnan(red_centroid)): - cv2.circle( - img=frame, - center=tuple(red_centroid.astype(int)), - radius=circle_radius, - color=RGB_YELLOW, - thickness=-1, - shift=cv2.CV_8U, - ) - - if np.all(~np.isnan(green_centroid)): - cv2.circle( - img=frame, - center=tuple(green_centroid.astype(int)), - radius=circle_radius, - color=RGB_PINK, - thickness=-1, - shift=cv2.CV_8U, - ) - - if np.all(~np.isnan(position)) & np.all(~np.isnan(orientation)): - arrow_tip = ( - int(position[0] + arrow_radius * np.cos(orientation)), - int(position[1] + arrow_radius * np.sin(orientation)), - ) - cv2.arrowedLine( - img=frame, - pt1=tuple(position.astype(int)), - pt2=arrow_tip, - color=RGB_WHITE, - thickness=4, - line_type=8, - shift=cv2.CV_8U, - tipLength=0.25, - ) - - if np.all(~np.isnan(position)): - cv2.circle( - img=frame, - center=tuple(position.astype(int)), - radius=circle_radius, - color=RGB_WHITE, - thickness=-1, - shift=cv2.CV_8U, - ) - - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - out.write(frame) - else: - break - - video.release() - out.release() - try: - cv2.destroyAllWindows() - except cv2.error: # if cv is already closed or does not have func - pass + make_video( + processor="opencv-trodes", + video_filename=video_path, + centroids={ + "red": np.asarray(adj_df[["xloc", "yloc"]]), + "green": np.asarray(adj_df[["xloc2", "yloc2"]]), + }, + position_mean=np.asarray(pos_df[["position_x", "position_y"]]), + orientation_mean=np.asarray(pos_df[["orientation"]]), + video_time=video_time, + position_time=np.asarray(pos_df.index), + output_video_filename=output_video_filename, + cm_to_pixels=meters_per_pixel * M_TO_CM, + disable_progressbar=False, + ) + self.insert1(dict(**key, has_video=True)) diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index 85ff1922a..f42c9858a 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -142,7 +142,7 @@ def dj_replace(original_table, new_values, key_column, replace_column): return original_table -def get_fetching_table_from_stack(stack): +def get_all_tables_in_stack(stack): """Get all classes from a stack of tables.""" classes = set() for frame_info in stack: @@ -153,11 +153,13 @@ def get_fetching_table_from_stack(stack): if (name := obj.full_table_name) in PERIPHERAL_TABLES: continue # skip common_nwbfile tables classes.add(name) + return classes + + +def get_fetching_table_from_stack(stack): + """Get all classes from a stack of tables.""" + classes = get_all_tables_in_stack(stack) if len(classes) > 1: - logger.warn( - f"Multiple classes found in stack: {classes}. " - "Please submit a bug report with the snippet used." - ) classes = None # predict only one but not sure, so return None return next(iter(classes)) if classes else None @@ -262,7 +264,11 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): # skip the filepath checksum if streamed from Dandi rec_dict["nwb2load_filepath"] = file_path continue - rec_dict["nwb2load_filepath"] = (query_table & rec_dict).fetch1( + + # Pulled from future cbroz1/ndo + # Full dict caused issues with dlc tables using dicts in secondary keys + rec_only_pk = {k: rec_dict[k] for k in query_table.heading.primary_key} + rec_dict["nwb2load_filepath"] = (query_table & rec_only_pk).fetch1( "nwb2load_filepath" ) @@ -332,7 +338,7 @@ def update_analysis_for_dandi_standard( # edit the file with h5py.File(filepath, "a") as file: sex_value = file["/general/subject/sex"][()].decode("utf-8") - if not sex_value in ["Female", "Male", "F", "M", "O", "U"]: + if sex_value not in ["Female", "Male", "F", "M", "O", "U"]: raise ValueError(f"Unexpected value for sex: {sex_value}") if len(sex_value) > 1: @@ -347,7 +353,8 @@ def update_analysis_for_dandi_standard( if species_value == "Rat": new_species_value = "Rattus norvegicus" print( - f"Adjusting subject species from '{species_value}' to '{new_species_value}'." + f"Adjusting subject species from '{species_value}' to " + + f"'{new_species_value}'." ) file["/general/subject/species"][()] = new_species_value @@ -355,8 +362,10 @@ def update_analysis_for_dandi_standard( len(species_value.split(" ")) == 2 or "NCBITaxon" in species_value ): raise ValueError( - f"Dandi upload requires species either be in Latin binomial form (e.g., 'Mus musculus' and 'Homo sapiens')" - + "or be a NCBI taxonomy link (e.g., 'http://purl.obolibrary.org/obo/NCBITaxon_280675')." + "Dandi upload requires species either be in Latin binomial form" + + " (e.g., 'Mus musculus' and 'Homo sapiens')" + + "or be a NCBI taxonomy link (e.g., " + + "'http://purl.obolibrary.org/obo/NCBITaxon_280675')." + f"\n Please update species value of: {species_value}" ) diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index 37a51b674..d1176de30 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -822,13 +822,14 @@ def delete_downstream_merge( Passthrough to SpyglassMixin.delete_downstream_merge """ - logger.warning( - "DEPRECATED: This function will be removed in `0.6`. " - + "Use AnyTable().delete_downstream_merge() instead." - ) + from spyglass.common.common_usage import ActivityLog from spyglass.utils.dj_mixin import SpyglassMixin + ActivityLog().deprecate_log( + "delete_downstream_merge. Use Table.delete_downstream_merge" + ) + if not isinstance(table, SpyglassMixin): raise ValueError("Input must be a Spyglass Table.") table = table if isinstance(table, dj.Table) else table() diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 51f398436..4cdbbbaa0 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -1,4 +1,3 @@ -import multiprocessing.pool from atexit import register as exit_register from atexit import unregister as exit_unregister from collections import OrderedDict @@ -129,6 +128,18 @@ def file_like(self, name=None, **kwargs): return return self & f"{attr} LIKE '%{name}%'" + def find_insert_fail(self, key): + """Find which parent table is causing an IntergrityError on insert.""" + for parent in self.parents(as_objects=True): + parent_key = { + k: v for k, v in key.items() if k in parent.heading.names + } + parent_name = to_camel_case(parent.table_name) + if query := parent & parent_key: + logger.info(f"{parent_name}:\n{query}") + else: + logger.info(f"{parent_name}: MISSING") + @classmethod def _safe_context(cls): """Return transaction if not already in one.""" diff --git a/src/spyglass/utils/nwb_helper_fn.py b/src/spyglass/utils/nwb_helper_fn.py index d5b6e4624..641b3f2da 100644 --- a/src/spyglass/utils/nwb_helper_fn.py +++ b/src/spyglass/utils/nwb_helper_fn.py @@ -101,7 +101,7 @@ def file_from_dandi(filepath): return False -def get_config(nwb_file_path): +def get_config(nwb_file_path, calling_table=None): """Return a dictionary of config settings for the given NWB file. If the file does not exist, return an empty dict. @@ -122,8 +122,14 @@ def get_config(nwb_file_path): # NOTE use p.stem[:-1] to remove the underscore that was added to the file config_path = p.parent / (p.stem[:-1] + "_spyglass_config.yaml") if not os.path.exists(config_path): - logger.info(f"No config found at file path {config_path}") - return dict() + from spyglass.settings import base_dir # noqa: F401 + + rel_path = p.relative_to(base_dir) + table = f"{calling_table}: " if calling_table else "" + logger.info(f"{table}No config found at {rel_path}") + ret = dict() + __configs[nwb_file_path] = ret # cache to avoid repeated null lookups + return ret with open(config_path, "r") as stream: d = yaml.safe_load(stream) diff --git a/tests/common/test_behav.py b/tests/common/test_behav.py index 6f2daa690..bcfd50270 100644 --- a/tests/common/test_behav.py +++ b/tests/common/test_behav.py @@ -22,18 +22,18 @@ def test_valid_epoch_num(common): assert epoch_num == 1, "PositionSource get_epoch_num failed" -def test_possource_make(common): +def test_pos_source_make(common): """Test custom populate""" common.PositionSource().make(common.Session()) -def test_possource_make_invalid(common): +def test_pos_source_make_invalid(common): """Test invalid populate""" with pytest.raises(ValueError): common.PositionSource().make(dict()) -def test_raw_position_fetchnwb(common, mini_pos, mini_pos_interval_dict): +def test_raw_position_fetch_nwb(common, mini_pos, mini_pos_interval_dict): """Test RawPosition fetch nwb""" fetched = DataFrame( (common.RawPosition & mini_pos_interval_dict) @@ -56,7 +56,7 @@ def test_raw_position_fetch1_df(common, mini_pos, mini_pos_interval_dict): assert fetched.equals(raw), "RawPosition fetch1_dataframe failed" -def test_raw_position_fetch_mult_df(common, mini_pos, mini_pos_interval_dict): +def test_raw_position_fetch_multi_df(common, mini_pos, mini_pos_interval_dict): """Test RawPosition fetch1 dataframe""" shape = common.RawPosition().fetch1_dataframe().shape assert shape == (542, 8), "RawPosition.PosObj fetch1_dataframe failed" @@ -94,7 +94,7 @@ def test_videofile_getabspath(common, video_keys): @pytest.mark.skipif(not TEARDOWN, reason="No teardown: expect no change.") -def test_posinterval_no_transaction(verbose_context, common, mini_restr): +def test_pos_interval_no_transaction(verbose_context, common, mini_restr): """Test no transaction""" before = common.PositionIntervalMap().fetch() with verbose_context: diff --git a/tests/common/test_device.py b/tests/common/test_device.py index 19103cf98..abfe60863 100644 --- a/tests/common/test_device.py +++ b/tests/common/test_device.py @@ -16,7 +16,7 @@ def test_get_device(common, mini_content): assert len(dev) == 3, "Unexpected number of devices found" -def test_spikegadets_system_alias(mini_insert, common): +def test_spike_gadgets_system_alias(mini_insert, common): assert ( common.DataAcquisitionDevice()._add_system("MCU") == "SpikeGadgets" ), "SpikeGadgets MCU alias not found" diff --git a/tests/common/test_ephys.py b/tests/common/test_ephys.py index 3887e00fc..37f298fdc 100644 --- a/tests/common/test_ephys.py +++ b/tests/common/test_ephys.py @@ -25,7 +25,7 @@ def test_electrode_populate(common_ephys): assert len(common_ephys.Electrode()) == 128, "Electrode.populate failed" -def test_egroup_populate(common_ephys): +def test_elec_group_populate(common_ephys): common_ephys.ElectrodeGroup.populate() assert ( len(common_ephys.ElectrodeGroup()) == 32 @@ -37,7 +37,7 @@ def test_raw_populate(common_ephys): assert len(common_ephys.Raw()) == 1, "Raw.populate failed" -def test_samplecount_populate(common_ephys): +def test_sample_count_populate(common_ephys): common_ephys.SampleCount.populate() assert len(common_ephys.SampleCount()) == 1, "SampleCount.populate failed" diff --git a/tests/common/test_insert.py b/tests/common/test_insert.py index 6d2fd18b3..f80967b4a 100644 --- a/tests/common/test_insert.py +++ b/tests/common/test_insert.py @@ -7,10 +7,10 @@ def test_insert_session(mini_insert, mini_content, mini_restr, common): subj_raw = mini_content.subject meta_raw = mini_content - sess_data = (common.Session & mini_restr).fetch1() + session_data = (common.Session & mini_restr).fetch1() assert ( - sess_data["subject_id"] == subj_raw.subject_id - ), "Subjuect ID not match" + session_data["subject_id"] == subj_raw.subject_id + ), "Subject ID not match" attrs = [ ("institution_name", "institution"), @@ -20,37 +20,37 @@ def test_insert_session(mini_insert, mini_content, mini_restr, common): ("experiment_description", "experiment_description"), ] - for sess_attr, meta_attr in attrs: - assert sess_data[sess_attr] == getattr( + for session_attr, meta_attr in attrs: + assert session_data[session_attr] == getattr( meta_raw, meta_attr - ), f"Session table {sess_attr} not match raw data {meta_attr}" + ), f"Session table {session_attr} not match raw data {meta_attr}" time_attrs = [ ("session_start_time", "session_start_time"), ("timestamps_reference_time", "timestamps_reference_time"), ] - for sess_attr, meta_attr in time_attrs: + for session_attr, meta_attr in time_attrs: # a. strip timezone info from meta_raw # b. convert to timestamp # c. compare precision to 1 second - assert sess_data[sess_attr].timestamp() == approx( + assert session_data[session_attr].timestamp() == approx( getattr(meta_raw, meta_attr).replace(tzinfo=None).timestamp(), abs=1 - ), f"Session table {sess_attr} not match raw data {meta_attr}" + ), f"Session table {session_attr} not match raw data {meta_attr}" def test_insert_electrode_group(mini_insert, mini_content, common): group_name = "0" - egroup_data = ( + elec_group_data = ( common.ElectrodeGroup & {"electrode_group_name": group_name} ).fetch1() - egroup_raw = mini_content.electrode_groups.get(group_name) + elec_group_raw = mini_content.electrode_groups.get(group_name) assert ( - egroup_data["description"] == egroup_raw.description + elec_group_data["description"] == elec_group_raw.description ), "ElectrodeGroup description not match" - assert egroup_data["region_id"] == ( - common.BrainRegion & {"region_name": egroup_raw.location} + assert elec_group_data["region_id"] == ( + common.BrainRegion & {"region_name": elec_group_raw.location} ).fetch1( "region_id" ), "Region ID does not match across raw data and BrainRegion table" @@ -138,7 +138,7 @@ def test_insert_pos( assert data_obj_id == raw_obj_id, "PosObject insertion error" -def test_fetch_posobj( +def test_fetch_pos_obj( mini_insert, common, mini_pos, mini_pos_series, mini_pos_tbl ): pos_key = ( diff --git a/tests/common/test_interval.py b/tests/common/test_interval.py index 8353961f8..e720b4466 100644 --- a/tests/common/test_interval.py +++ b/tests/common/test_interval.py @@ -23,5 +23,5 @@ def test_plot_epoch(mini_insert, interval_list): epoch_label = fig.get_axes()[0].get_yticklabels()[-1].get_text() assert epoch_label == "epoch", "plot_epoch failed" - epoch_interv = fig.get_axes()[0].lines[0].get_ydata() - assert array_equal(epoch_interv, [1, 1]), "plot_epoch failed" + epoch_interval = fig.get_axes()[0].lines[0].get_ydata() + assert array_equal(epoch_interval, [1, 1]), "plot_epoch failed" diff --git a/tests/common/test_interval_helpers.py b/tests/common/test_interval_helpers.py index d4e7eb1ac..3ef505f57 100644 --- a/tests/common/test_interval_helpers.py +++ b/tests/common/test_interval_helpers.py @@ -111,7 +111,7 @@ def test_interval_list_contains_ind(common, interval_list_dict): ), "Problem with common_interval.interval_list_contains_ind" -def test_insterval_list_contains(common, interval_list_dict): +def test_interval_list_contains(common, interval_list_dict): idxs = common.common_interval.interval_list_contains(**interval_list_dict) assert np.array_equal( idxs, np.array([1, 7, 8]) diff --git a/tests/common/test_lab.py b/tests/common/test_lab.py index 83ab84c10..0133c2bfe 100644 --- a/tests/common/test_lab.py +++ b/tests/common/test_lab.py @@ -71,7 +71,7 @@ def add_member_team(common_lab, add_admin): yield -def test_labmember_insert_file_str(mini_insert, common_lab, mini_copy_name): +def test_lab_member_insert_file_str(mini_insert, common_lab, mini_copy_name): before = common_lab.LabMember.fetch() common_lab.LabMember.insert_from_nwbfile(mini_copy_name) after = common_lab.LabMember.fetch() diff --git a/tests/common/test_region.py b/tests/common/test_region.py index 8241cb304..9a89ede03 100644 --- a/tests/common/test_region.py +++ b/tests/common/test_region.py @@ -29,4 +29,4 @@ def test_region_add(brain_region, region_dict): ) assert ( region_id == next_id - ), "Region.fetch_add() should autincrement region_id." + ), "Region.fetch_add() should autoincrement region_id." diff --git a/tests/common/test_session.py b/tests/common/test_session.py index 6e0a8f0ce..2276f23bd 100644 --- a/tests/common/test_session.py +++ b/tests/common/test_session.py @@ -1,5 +1,4 @@ import pytest -from datajoint.errors import DataJointError @pytest.fixture @@ -46,7 +45,7 @@ def add_session_to_group(session_group, mini_copy_name, group_name_dict): ) -def test_addremove_session_group( +def test_add_remove_session_group( common_session, session_group, session_group_dict, diff --git a/tests/conftest.py b/tests/conftest.py index fe8ce1a5b..8a58df39b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,7 +11,6 @@ from contextlib import nullcontext from pathlib import Path from shutil import rmtree as shutil_rmtree -from time import sleep as tsleep import datajoint as dj import numpy as np @@ -171,18 +170,18 @@ def server(request, teardown): @pytest.fixture(scope="session") -def server_creds(server): - yield server.creds +def server_credentials(server): + yield server.credentials @pytest.fixture(scope="session") -def dj_conn(request, server_creds, verbose, teardown): +def dj_conn(request, server_credentials, verbose, teardown): """Fixture for datajoint connection.""" config_file = "dj_local_conf.json_test" if Path(config_file).exists(): os.remove(config_file) - dj.config.update(server_creds) + dj.config.update(server_credentials) dj.config["loglevel"] = "INFO" if verbose else "ERROR" dj.config["custom"]["spyglass_dirs"] = {"base": str(BASE_DIR)} dj.config.save(config_file) @@ -210,34 +209,25 @@ def raw_dir(base_dir): @pytest.fixture(scope="session") def mini_path(raw_dir): path = raw_dir / TEST_FILE + DOWNLOADS.wait_for(TEST_FILE) # wait for wget download to finish - # wait for wget download to finish - if (nwb_download := DOWNLOADS.file_downloads.get(TEST_FILE)) is not None: - nwb_download.wait() - - # wait for download to finish - timeout, wait, found = 60, 5, False - for _ in range(timeout // wait): - if path.exists(): - found = True - break - tsleep(wait) - - if not found: + if not path.exists(): raise ConnectionError("Download failed.") yield path @pytest.fixture(scope="session") -def nodlc(request): +def no_dlc(request): yield NO_DLC @pytest.fixture(scope="session") -def skipif_nodlc(request): +def skipif_no_dlc(request): if NO_DLC: yield pytest.mark.skip(reason="Skipping DLC-dependent tests.") + else: + yield @pytest.fixture(scope="session") @@ -293,11 +283,9 @@ def mini_insert( _ = SpikeSortingOutput() - LabMember().insert1( - ["Root User", "Root", "User"], skip_duplicates=not teardown - ) + LabMember().insert1(["Root User", "Root", "User"], skip_duplicates=True) LabMember.LabMemberInfo().insert1( - ["Root User", "email", "root", 1], skip_duplicates=not teardown + ["Root User", "email", "root", 1], skip_duplicates=True ) dj_logger.info("Inserting test data.") @@ -395,14 +383,40 @@ def populate_exception(): yield PopulateException +@pytest.fixture(scope="session") +def frequent_imports(): + """Often needed for graph cascade.""" + from spyglass.common.common_ripple import RippleLFPSelection + from spyglass.decoding.v0.clusterless import UnitMarksIndicatorSelection + from spyglass.decoding.v0.sorted_spikes import ( + SortedSpikesIndicatorSelection, + ) + from spyglass.decoding.v1.core import PositionGroup + from spyglass.lfp.analysis.v1 import LFPBandSelection + from spyglass.mua.v1.mua import MuaEventsV1 + from spyglass.ripple.v1.ripple import RippleTimesV1 + from spyglass.spikesorting.v0.figurl_views import SpikeSortingRecordingView + + return ( + LFPBandSelection, + MuaEventsV1, + PositionGroup, + RippleLFPSelection, + RippleTimesV1, + SortedSpikesIndicatorSelection, + SpikeSortingRecordingView, + UnitMarksIndicatorSelection, + ) + + # -------------------------- FIXTURES, COMMON TABLES -------------------------- @pytest.fixture(scope="session") def video_keys(common, base_dir): - for file, download in DOWNLOADS.file_downloads.items(): - if file.endswith(".h264") and download is not None: - download.wait() # wait for videos to finish downloading + for file in DOWNLOADS.file_downloads: + if file.endswith(".h264"): + DOWNLOADS.wait_for(file) DOWNLOADS.rename_files() return common.VideoFile().fetch(as_dict=True) @@ -807,6 +821,7 @@ def dlc_project_name(): def insert_project( verbose_context, teardown, + video_keys, # wait for video downloads dlc_project_name, dlc_project_tbl, common, @@ -818,18 +833,32 @@ def insert_project( from deeplabcut.utils.auxiliaryfunctions import read_config, write_config + from spyglass.decoding.v1.core import PositionGroup + from spyglass.linearization.merge import LinearizedPositionOutput + from spyglass.linearization.v1 import LinearizationSelection + from spyglass.mua.v1.mua import MuaEventsV1 + from spyglass.ripple.v1 import RippleTimesV1 + + _ = ( + PositionGroup, + LinearizedPositionOutput, + LinearizationSelection, + MuaEventsV1, + RippleTimesV1, + ) + team_name = "sc_eb" common.LabTeam.insert1({"team_name": team_name}, skip_duplicates=True) + video_list = common.VideoFile().fetch( + "nwb_file_name", "epoch", as_dict=True + )[:2] with verbose_context: project_key = dlc_project_tbl.insert_new_project( project_name=dlc_project_name, bodyparts=bodyparts, lab_team=team_name, frames_per_video=100, - video_list=[ - {"nwb_file_name": mini_copy_name, "epoch": 0}, - {"nwb_file_name": mini_copy_name, "epoch": 1}, - ], + video_list=video_list, skip_duplicates=True, ) config_path = (dlc_project_tbl & project_key).fetch1("config_path") @@ -904,23 +933,8 @@ def labeled_vid_dir(extract_frames): @pytest.fixture(scope="session") -def fix_downloaded(labeled_vid_dir, project_dir): - """Grabs CollectedData and img files from project_dir, moves to labeled""" - for file in project_dir.parent.parent.glob("*"): - if file.is_dir(): - continue - dest = labeled_vid_dir / file.name - if dest.exists(): - dest.unlink() - dest.write_bytes(file.read_bytes()) - # TODO: revert to rename before merge - # file.rename(labeled_vid_dir / file.name) - - yield - - -@pytest.fixture(scope="session") -def add_training_files(dlc_project_tbl, project_key, fix_downloaded): +def add_training_files(dlc_project_tbl, project_key, labeled_vid_dir): + DOWNLOADS.move_dlc_items(labeled_vid_dir) dlc_project_tbl.add_training_files(project_key, skip_duplicates=True) yield @@ -970,11 +984,13 @@ def model_train_key(sgp, project_key, training_params_key): @pytest.fixture(scope="session") -def populate_training(sgp, fix_downloaded, model_train_key, add_training_files): +def populate_training( + sgp, model_train_key, add_training_files, labeled_vid_dir +): train_tbl = sgp.v1.DLCModelTraining if len(train_tbl & model_train_key) == 0: _ = add_training_files - _ = fix_downloaded + DOWNLOADS.move_dlc_items(labeled_vid_dir) sgp.v1.DLCModelTraining.populate(model_train_key) yield model_train_key @@ -1004,7 +1020,7 @@ def populate_model(sgp, model_key): @pytest.fixture(scope="session") def pose_estimation_key(sgp, mini_copy_name, populate_model, model_key): - yield sgp.v1.DLCPoseEstimationSelection.insert_estimation_task( + yield sgp.v1.DLCPoseEstimationSelection().insert_estimation_task( { "nwb_file_name": mini_copy_name, "epoch": 1, @@ -1094,13 +1110,10 @@ def cohort_selection(sgp, si_key, si_params_name): @pytest.fixture(scope="session") -def cohort_key(sgp, cohort_selection): - yield cohort_selection.copy() - - -@pytest.fixture(scope="session") -def populate_cohort(sgp, cohort_selection, populate_si): - sgp.v1.DLCSmoothInterpCohort.populate(cohort_selection) +def cohort_key(sgp, cohort_selection, populate_si): + cohort_tbl = sgp.v1.DLCSmoothInterpCohort() + cohort_tbl.populate(cohort_selection) + yield cohort_tbl.fetch("KEY", as_dict=True)[0] @pytest.fixture(scope="session") @@ -1130,7 +1143,7 @@ def centroid_params(sgp): @pytest.fixture(scope="session") -def centroid_selection(sgp, cohort_key, populate_cohort, centroid_params): +def centroid_selection(sgp, cohort_key, centroid_params): centroid_key = cohort_key.copy() centroid_key = { key: val @@ -1194,7 +1207,10 @@ def populate_orient(sgp, orient_selection): @pytest.fixture(scope="session") -def dlc_selection(sgp, centroid_key, orient_key, populate_orient): +def dlc_selection( + sgp, centroid_key, orient_key, populate_orient, populate_centroid +): + _ = populate_orient, populate_centroid dlc_key = { key: val for key, val in centroid_key.items() diff --git a/tests/container.py b/tests/container.py index b9d77263e..1747d76b8 100644 --- a/tests/container.py +++ b/tests/container.py @@ -190,7 +190,7 @@ def add_user(self) -> int: return None @property - def creds(self): + def credentials(self): """Datajoint credentials for this container.""" return { "database.host": "localhost", @@ -204,7 +204,7 @@ def creds(self): @property def connected(self) -> bool: self.wait() - dj.config.update(self.creds) + dj.config.update(self.credentials) return dj.conn().is_connected def stop(self, remove=True) -> None: diff --git a/tests/data_downloader.py b/tests/data_downloader.py index 98a254eda..cb58e1c71 100644 --- a/tests/data_downloader.py +++ b/tests/data_downloader.py @@ -1,10 +1,14 @@ from functools import cached_property from os import environ as os_environ from pathlib import Path +from shutil import copy as shutil_copy from subprocess import DEVNULL, Popen from sys import stderr, stdout +from time import sleep as time_sleep from typing import Dict, Union +from datajoint import logger as dj_logger + UCSF_BOX_USER = os_environ.get("UCSF_BOX_USER") UCSF_BOX_TOKEN = os_environ.get("UCSF_BOX_TOKEN") BASE_URL = "ftps://ftp.box.com/trodes_to_nwb_test_data/" @@ -87,6 +91,7 @@ def __init__( self.cmd_kwargs = dict(stdout=stdout, stderr=stderr) self.base_dir = Path(base_dir).resolve() + self.download_dlc = download_dlc self.file_paths = file_paths if download_dlc else file_paths[:NON_DLC] self.base_dir.mkdir(exist_ok=True) @@ -94,7 +99,7 @@ def __init__( _ = self.file_downloads def rename_files(self): - """Redund, but allows rerun later in startup process of conftest.""" + """Redundant, but allows rerun later in startup process of conftest.""" for path in self.file_paths: target, url = path["target_name"], path["url"] target_dir = self.base_dir / path["relative_dir"] @@ -112,28 +117,42 @@ def file_downloads(self) -> Dict[str, Union[Popen, None]]: for path in self.file_paths: target, url = path["target_name"], path["url"] target_dir = self.base_dir / path["relative_dir"] + target_dir.mkdir(exist_ok=True, parents=True) dest = target_dir / target + cmd = ( + ["echo", f"Already have {target}"] + if dest.exists() + else self.cmd + [target_dir, url] + ) + ret[target] = Popen(cmd, **self.cmd_kwargs) + return ret - if dest.exists(): - ret[target] = None - continue + def wait_for(self, target: str): + """Wait for target to finish downloading.""" + status = self.file_downloads.get(target).poll() + limit = 10 + while status is None and limit > 0: + time_sleep(5) # Some + limit -= 1 + status = self.file_downloads.get(target).poll() + if status != 0: + raise ValueError(f"Error downloading: {target}") + if limit < 1: + raise TimeoutError(f"Timeout downloading: {target}") - target_dir.mkdir(exist_ok=True, parents=True) - ret[target] = Popen(self.cmd + [target_dir, url], **self.cmd_kwargs) - return ret + def move_dlc_items(self, dest_dir: Path): + """Move completed DLC files to dest_dir.""" + if not self.download_dlc: + return + if not dest_dir.exists(): + dest_dir.mkdir(parents=True) - def check_download(self, download, info): - if download is not None: - download.wait() - if download.returncode: - return download - return None - - @property - def download_errors(self): - ret = [] - for download, item in zip(self.file_downloads, self.file_paths): - if d_status := self.check_download(download, item): - ret.append(d_status) - continue - return ret + for path in self.file_paths[NON_DLC:]: + target = path["target_name"] + self.wait_for(target) # Could be faster if moved finished first + + src_path = self.base_dir / path["relative_dir"] / target + dest_path = dest_dir / src_path.name + if not dest_path.exists(): + shutil_copy(str(src_path), str(dest_path)) + dj_logger.info(f"Moved: {src_path} -> {dest_path}") diff --git a/tests/lfp/test_lfp.py b/tests/lfp/test_lfp.py index b496ae445..b85bcc3bf 100644 --- a/tests/lfp/test_lfp.py +++ b/tests/lfp/test_lfp.py @@ -25,13 +25,13 @@ def test_lfp_dataframe(lfp, lfp_raw, lfp_merge_key): def test_lfp_band_dataframe(lfp_band_analysis_raw, lfp_band, lfp_band_key): - lfpb_raw = ( + lfp_band_raw = ( lfp_band_analysis_raw.processing["ecephys"] .fields["data_interfaces"]["LFP"] .electrical_series["filtered data"] ) - lfpb_index = Index(lfpb_raw.timestamps, name="time") - df_raw = DataFrame(lfpb_raw.data, index=lfpb_index) + lfp_band_index = Index(lfp_band_raw.timestamps, name="time") + df_raw = DataFrame(lfp_band_raw.data, index=lfp_band_index) df_fetch = (lfp_band.LFPBandV1 & lfp_band_key).fetch1_dataframe() assert df_raw.equals(df_fetch), "LFPBand dataframe not match." @@ -91,7 +91,7 @@ def test_invalid_band_selection( set_elec(**valid | {"reference_electrode_list": [3]}) -def test_artifactparam_defaults(art_params, art_param_defaults): +def test_artifact_param_defaults(art_params, art_param_defaults): assert set(art_params.fetch("artifact_params_name")).issubset( set(art_param_defaults) ), "LFPArtifactDetectionParameters missing default item." diff --git a/tests/position/conftest.py b/tests/position/conftest.py index c6c58d199..8f9e90795 100644 --- a/tests/position/conftest.py +++ b/tests/position/conftest.py @@ -30,6 +30,7 @@ def dlc_video_params(sgp): "params": { "percent_frames": 0.05, "incl_likelihood": True, + "processor": "opencv", }, }, skip_duplicates=True, diff --git a/tests/position/test_dlc_cent.py b/tests/position/test_dlc_cent.py index a3675b2ae..7980a2b30 100644 --- a/tests/position/test_dlc_cent.py +++ b/tests/position/test_dlc_cent.py @@ -47,17 +47,34 @@ def test_validate_params(params_tbl): @pytest.mark.parametrize( - "key", ["four_led_centroid", "two_pt_centroid", "one_pt_centroid"] + "key", ["one_pt_centroid", "two_pt_centroid", "four_led_centroid"] ) def test_centroid_calcs(key, sgp): + from spyglass.position.v1.dlc_utils import Centroid + points = sgp.v1.position_dlc_centroid._key_to_points[key] - func = sgp.v1.position_dlc_centroid._key_to_func_dict[key] df = generate_led_df(points) - ret = func(df, max_LED_separation=100, points={p: p for p in points}) + ret = Centroid( + df, max_LED_separation=100, points={p: p for p in points} + ).centroid assert np.all(ret[:-1] == 1), f"Centroid calculation failed for {key}" assert np.all(np.isnan(ret[-1])), f"Centroid calculation failed for {key}" - with pytest.raises(KeyError): - func(df) # Missing led separation/point names + +def test_centroid_error(sgp): + from spyglass.position.v1.dlc_utils import Centroid + + one_pt = {"point1": "point1"} + df = generate_led_df(one_pt) + Centroid(df, points=one_pt) # no sep ok on one point + + two_pt = {"point1": "point1", "point2": "point2"} + with pytest.raises(ValueError): + Centroid(df, points=two_pt) # Missing led separation for valid points + + three_pt = {"point1": "point1", "point2": "point2", "point3": "point3"} + three_pt_df = generate_led_df(three_pt) + with pytest.raises(ValueError): # invalid point number + Centroid(three_pt_df, points=three_pt, max_LED_separation=100) diff --git a/tests/position/test_dlc_model.py b/tests/position/test_dlc_model.py index 6f1ccf89d..036f98cdf 100644 --- a/tests/position/test_dlc_model.py +++ b/tests/position/test_dlc_model.py @@ -14,5 +14,5 @@ def test_model_params_default(sgp): def test_model_input_assert(sgp): - with pytest.raises(AssertionError): + with pytest.raises(FileNotFoundError): sgp.v1.DLCModelInput().insert1({"config_path": "/fake/path/"}) diff --git a/tests/position/test_dlc_pos_est.py b/tests/position/test_dlc_pos_est.py index fdf055843..f66f06616 100644 --- a/tests/position/test_dlc_pos_est.py +++ b/tests/position/test_dlc_pos_est.py @@ -6,9 +6,9 @@ def pos_est_sel(sgp): yield sgp.v1.position_dlc_pose_estimation.DLCPoseEstimationSelection() -@pytest.mark.usefixtures("skipif_nodlc") +@pytest.mark.usefixtures("skipif_no_dlc") def test_rename_non_default_columns(sgp, common, pos_est_sel, video_keys): - vid_path, vid_name, _, _ = sgp.v1.dlc_utils.get_video_path(video_keys[0]) + vid_path, vid_name, _, _ = sgp.v1.dlc_utils.get_video_info(video_keys[0]) input = "0, 10, 0, 1000" output = pos_est_sel.get_video_crop(vid_path + vid_name, input) diff --git a/tests/position/test_dlc_sel.py b/tests/position/test_dlc_sel.py index 35b33fe06..b0cd3340b 100644 --- a/tests/position/test_dlc_sel.py +++ b/tests/position/test_dlc_sel.py @@ -1,4 +1,4 @@ -def test_dlcvideo_default(sgp): +def test_dlc_video_default(sgp): expected_default = { "dlc_pos_video_params_name": "default", "params": { diff --git a/tests/position/test_dlc_train.py b/tests/position/test_dlc_train.py index eefa26f66..acd4d701d 100644 --- a/tests/position/test_dlc_train.py +++ b/tests/position/test_dlc_train.py @@ -25,9 +25,9 @@ def test_existing_params( assert len(params_query) == 1, "Existing params duplicated" -@pytest.mark.usefixtures("skipif_nodlc") -def test_get_params(nodlc, verbose_context, dlc_training_params): - if nodlc: # Decorator wasn't working here, so duplicate skipif +@pytest.mark.usefixtures("skipif_no_dlc") +def test_get_params(no_dlc, verbose_context, dlc_training_params): + if no_dlc: # Decorator wasn't working here, so duplicate skipif pytest.skip(reason="Skipping DLC-dependent tests.") params_tbl, _ = dlc_training_params diff --git a/tests/position/test_trodes.py b/tests/position/test_trodes.py index 92fdfeeb1..6d65f375c 100644 --- a/tests/position/test_trodes.py +++ b/tests/position/test_trodes.py @@ -61,7 +61,8 @@ def test_fetch_df(trodes_pos_v1, trodes_params): assert hash_df == hash_exp, "Dataframe differs from expected" -def test_trodes_video(sgp): +def test_trodes_video(sgp, trodes_pos_v1): + _ = trodes_pos_v1 # ensure table is populated vid_tbl = sgp.v1.TrodesPosVideo() _ = vid_tbl.populate() assert len(vid_tbl) == 2, "Failed to populate TrodesPosVideo" diff --git a/tests/utils/test_db_settings.py b/tests/utils/test_db_settings.py index 3b72ec885..b2435e1f1 100644 --- a/tests/utils/test_db_settings.py +++ b/tests/utils/test_db_settings.py @@ -12,10 +12,10 @@ def db_settings(user_name): return DatabaseSettings( user_name=user_name, - host_name=docker_server.creds["database.host"], + host_name=docker_server.credentials["database.host"], target_database=id, - exec_user=docker_server.creds["database.user"], - exec_pass=docker_server.creds["database.password"], + exec_user=docker_server.credentials["database.user"], + exec_pass=docker_server.credentials["database.password"], test_mode=no_docker, ) diff --git a/tests/utils/test_graph.py b/tests/utils/test_graph.py index 7d5257a36..18899e147 100644 --- a/tests/utils/test_graph.py +++ b/tests/utils/test_graph.py @@ -72,9 +72,11 @@ def test_add_leaf_restr_ft(restr_graph_new_leaf): @pytest.fixture(scope="session") -def restr_graph_root(restr_graph, common, lfp_band, lin_v1): +def restr_graph_root(restr_graph, common, lfp_band, lin_v1, frequent_imports): from spyglass.utils.dj_graph import RestrGraph + _ = lfp_band, lin_v1, frequent_imports # tables populated + yield RestrGraph( seed_table=common.Session(), table_name=common.Session.full_table_name, diff --git a/tests/utils/test_mixin.py b/tests/utils/test_mixin.py index 5b6beb4d0..93d13407a 100644 --- a/tests/utils/test_mixin.py +++ b/tests/utils/test_mixin.py @@ -21,7 +21,7 @@ class Mixin(SpyglassMixin, dj.Manual): reason="Error only on verbose or new declare.", ) def test_bad_prefix(caplog, dj_conn, Mixin): - schema_bad = dj.Schema("badprefix", {}, connection=dj_conn) + schema_bad = dj.Schema("bad_prefix", {}, connection=dj_conn) schema_bad(Mixin) assert "Schema prefix not in SHARED_MODULES" in caplog.text @@ -55,7 +55,7 @@ def test_merge_chain_join( ] end_len = [len(chain) for chain in all_chains] - assert sum(end_len) == 4, "Merge chains not joined correctly." + assert sum(end_len) >= 3, "Merge chains not joined correctly." def test_get_chain(Nwbfile, pos_merge_tables): diff --git a/tests/utils/test_nwb_helper_fn.py b/tests/utils/test_nwb_helper_fn.py index ff8175e44..bf2e8aba1 100644 --- a/tests/utils/test_nwb_helper_fn.py +++ b/tests/utils/test_nwb_helper_fn.py @@ -36,7 +36,7 @@ def custom_nwbfile(common): filtering="filtering", group=elec_group, ) - elecs_region = nwbfile.electrodes.create_region( + electrode_region = nwbfile.electrodes.create_region( name="electrodes", region=[2, 3, 4, 5], description="description", # indices @@ -46,7 +46,7 @@ def custom_nwbfile(common): name="eseries", data=[0, 1, 2], timestamps=[0.0, 1.0, 2.0], - electrodes=elecs_region, + electrodes=electrode_region, ) ) yield nwbfile From b89ea99949dd312de2cd571c60149115c36e385f Mon Sep 17 00:00:00 2001 From: Chris Broz Date: Wed, 26 Jun 2024 17:13:39 -0500 Subject: [PATCH 58/60] Prevent delete orphans (#1002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * WIP: No Orphans downstream * WIP: Bidirectional RestrGraph, remove TableChains * WIP: bridge up to interval list * Add tests for new delete * Update changelog * Fix typo * WIP: topological sort of deletes * ✅ : Topological sort * Revise downloads * Update src/spyglass/utils/dj_helper_fn.py Co-authored-by: Samuel Bray * Ignore unimported non-spyglass in cascade * Load part-master cache before graph * Add more automatic imports * Pin twine req for build --------- Co-authored-by: Samuel Bray --- .github/workflows/test-package-build.yml | 4 +- .gitignore | 2 + CHANGELOG.md | 2 + docs/src/misc/mixin.md | 37 +- notebooks/01_Insert_Data.ipynb | 2 +- notebooks/03_Merge_Tables.ipynb | 10 +- notebooks/py_scripts/01_Insert_Data.py | 2 +- notebooks/py_scripts/03_Merge_Tables.py | 12 +- pyproject.toml | 4 +- src/spyglass/common/common_interval.py | 7 + src/spyglass/common/common_usage.py | 21 +- src/spyglass/decoding/v0/core.py | 1 + src/spyglass/utils/dj_graph.py | 493 ++++++++++++----------- src/spyglass/utils/dj_helper_fn.py | 14 +- src/spyglass/utils/dj_merge_tables.py | 5 +- src/spyglass/utils/dj_mixin.py | 363 +++++++++-------- tests/common/test_usage.py | 89 ++++ tests/utils/conftest.py | 17 +- tests/utils/test_chains.py | 31 +- tests/utils/test_graph.py | 134 +++++- tests/utils/test_merge.py | 19 + tests/utils/test_mixin.py | 99 +++-- 22 files changed, 840 insertions(+), 528 deletions(-) create mode 100644 tests/common/test_usage.py diff --git a/.github/workflows/test-package-build.yml b/.github/workflows/test-package-build.yml index 41aace719..c93b77398 100644 --- a/.github/workflows/test-package-build.yml +++ b/.github/workflows/test-package-build.yml @@ -27,7 +27,9 @@ jobs: - uses: actions/setup-python@v5 with: python-version: 3.9 - - run: pip install --upgrade build twine + - run: | + pip install --upgrade build twine + pip install importlib_metadata==7.2.1 # twine #977 - name: Build sdist and wheel run: python -m build - run: twine check dist/* diff --git a/.gitignore b/.gitignore index 1f18f4178..0cbd43c74 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ coverage.xml .hypothesis/ .pytest_cache/ tests/_data/* +wget-log* # Translations *.mo @@ -128,6 +129,7 @@ dmypy.json .pyre/ # Test Data Files +tests/_data/* *.dat *.mda *.rec diff --git a/CHANGELOG.md b/CHANGELOG.md index 69d2d0a87..0092789c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,8 @@ PositionIntervalMap.alter() - Add pytests for position pipeline, various `test_mode` exceptions #966 - Migrate `pip` dependencies from `environment.yml`s to `pyproject.toml` #966 - Add documentation for common error messages #997 +- Expand `delete_downstream_merge` -> `delete_downstream_parts`. #1002 +- `cautious_delete` now checks `IntervalList` and externals tables. #1002 - Allow mixin tables with parallelization in `make` to run populate with `processes > 1` #1001 diff --git a/docs/src/misc/mixin.md b/docs/src/misc/mixin.md index ab49b0c49..23135d3c4 100644 --- a/docs/src/misc/mixin.md +++ b/docs/src/misc/mixin.md @@ -136,29 +136,38 @@ masters, or null entry masters without matching data. For [Merge tables](./merge_tables.md), this is a significant problem. If a user wants to delete all entries associated with a given session, she must find all -Merge entries and delete them in the correct order. The mixin provides a -function, `delete_downstream_merge`, to handle this, which is run by default -when calling `delete`. +part table entries, including Merge tables, and delete them in the correct +order. The mixin provides a function, `delete_downstream_parts`, to handle this, +which is run by default when calling `delete`. -`delete_downstream_merge`, also aliased as `ddm`, identifies all Merge tables -downstream of where it is called. If `dry_run=True`, it will return a list of -entries that would be deleted, otherwise it will delete them. +`delete_downstream_parts`, also aliased as `ddp`, identifies all part tables +with foreign key references downstream of where it is called. If `dry_run=True`, +it will return a list of entries that would be deleted, otherwise it will delete +them. -Importantly, `delete_downstream_merge` cannot properly interact with tables that +Importantly, `delete_downstream_parts` cannot properly interact with tables that have not been imported into the current namespace. If you are having trouble with part deletion errors, import the offending table and rerun the function with `reload_cache=True`. ```python +import datajoint as dj from spyglass.common import Nwbfile restricted_nwbfile = Nwbfile() & "nwb_file_name LIKE 'Name%'" -restricted_nwbfile.delete_downstream_merge(dry_run=False) -# DataJointError("Attempt to delete part table MyMerge.Part before ... + +vanilla_dj_table = dj.FreeTable(dj.conn(), Nwbfile.full_table_name) +vanilla_dj_table.delete() +# DataJointError("Attempt to delete part table MyMerge.Part before ... ") + +restricted_nwbfile.delete() +# [WARNING] Spyglass: No part deletes found w/ Nwbfile ... +# OR +# ValueError("Please import MyMerge and try again.") from spyglass.example import MyMerge -restricted_nwbfile.delete_downstream_merge(reload_cache=True, dry_run=False) +restricted_nwbfile.delete_downstream_parts(reload_cache=True, dry_run=False) ``` Because each table keeps a cache of downstream merge tables, it is important to @@ -169,13 +178,13 @@ Speed gains can also be achieved by avoiding re-instancing the table each time. # Slow from spyglass.common import Nwbfile -(Nwbfile() & "nwb_file_name LIKE 'Name%'").ddm(dry_run=False) -(Nwbfile() & "nwb_file_name LIKE 'Other%'").ddm(dry_run=False) +(Nwbfile() & "nwb_file_name LIKE 'Name%'").ddp(dry_run=False) +(Nwbfile() & "nwb_file_name LIKE 'Other%'").ddp(dry_run=False) # Faster from spyglass.common import Nwbfile nwbfile = Nwbfile() -(nwbfile & "nwb_file_name LIKE 'Name%'").ddm(dry_run=False) -(nwbfile & "nwb_file_name LIKE 'Other%'").ddm(dry_run=False) +(nwbfile & "nwb_file_name LIKE 'Name%'").ddp(dry_run=False) +(nwbfile & "nwb_file_name LIKE 'Other%'").ddp(dry_run=False) ``` diff --git a/notebooks/01_Insert_Data.ipynb b/notebooks/01_Insert_Data.ipynb index 23c208cdf..a92e14ca0 100644 --- a/notebooks/01_Insert_Data.ipynb +++ b/notebooks/01_Insert_Data.ipynb @@ -2134,7 +2134,7 @@ "```python\n", "nwbfile = sgc.Nwbfile()\n", "\n", - "(nwbfile & {\"nwb_file_name\": nwb_copy_file_name}).delete_downstream_merge(\n", + "(nwbfile & {\"nwb_file_name\": nwb_copy_file_name}).delete_downstream_parts(\n", " dry_run=False, # True will show Merge Table entries that would be deleted\n", ")\n", "```\n", diff --git a/notebooks/03_Merge_Tables.ipynb b/notebooks/03_Merge_Tables.ipynb index 6adbbd5bf..0f9b2c3c2 100644 --- a/notebooks/03_Merge_Tables.ipynb +++ b/notebooks/03_Merge_Tables.ipynb @@ -90,7 +90,7 @@ "import spyglass.common as sgc\n", "import spyglass.lfp as lfp\n", "from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename\n", - "from spyglass.utils.dj_merge_tables import delete_downstream_merge, Merge\n", + "from spyglass.utils.dj_merge_tables import delete_downstream_parts, Merge\n", "from spyglass.common.common_ephys import LFP as CommonLFP # Upstream 1\n", "from spyglass.lfp.lfp_merge import LFPOutput # Merge Table\n", "from spyglass.lfp.v1.lfp import LFPV1 # Upstream 2" @@ -955,8 +955,8 @@ "2. use `merge_delete_parent` to delete from the parent sources, getting rid of\n", " the entries in the source table they came from.\n", "\n", - "3. use `delete_downstream_merge` to find Merge Tables downstream of any other\n", - " table and get rid full entries, avoiding orphaned master table entries.\n", + "3. use `delete_downstream_parts` to find downstream part tables, like Merge \n", + " Tables, and get rid full entries, avoiding orphaned master table entries.\n", "\n", "The two latter cases can be destructive, so we include an extra layer of\n", "protection with `dry_run`. When true (by default), these functions return\n", @@ -1016,7 +1016,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`delete_downstream_merge` is available from any other table in the pipeline,\n", + "`delete_downstream_parts` is available from any other table in the pipeline,\n", "but it does take some time to find the links downstream. If you're using this,\n", "you can save time by reassigning your table to a variable, which will preserve\n", "a copy of the previous search.\n", @@ -1056,7 +1056,7 @@ "source": [ "nwbfile = sgc.Nwbfile()\n", "\n", - "(nwbfile & nwb_file_dict).delete_downstream_merge(\n", + "(nwbfile & nwb_file_dict).delete_downstream_parts(\n", " dry_run=True,\n", " reload_cache=False, # if still encountering errors, try setting this to True\n", ")" diff --git a/notebooks/py_scripts/01_Insert_Data.py b/notebooks/py_scripts/01_Insert_Data.py index 48ddae39b..f569f971f 100644 --- a/notebooks/py_scripts/01_Insert_Data.py +++ b/notebooks/py_scripts/01_Insert_Data.py @@ -378,7 +378,7 @@ # ```python # nwbfile = sgc.Nwbfile() # -# (nwbfile & {"nwb_file_name": nwb_copy_file_name}).delete_downstream_merge( +# (nwbfile & {"nwb_file_name": nwb_copy_file_name}).delete_downstream_parts( # dry_run=False, # True will show Merge Table entries that would be deleted # ) # ``` diff --git a/notebooks/py_scripts/03_Merge_Tables.py b/notebooks/py_scripts/03_Merge_Tables.py index ac3ad4e69..690bc7834 100644 --- a/notebooks/py_scripts/03_Merge_Tables.py +++ b/notebooks/py_scripts/03_Merge_Tables.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.15.2 +# jupytext_version: 1.16.0 # kernelspec: # display_name: spy # language: python @@ -64,7 +64,7 @@ import spyglass.common as sgc import spyglass.lfp as lfp from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename -from spyglass.utils.dj_merge_tables import delete_downstream_merge, Merge +from spyglass.utils.dj_merge_tables import delete_downstream_parts, Merge from spyglass.common.common_ephys import LFP as CommonLFP # Upstream 1 from spyglass.lfp.lfp_merge import LFPOutput # Merge Table from spyglass.lfp.v1.lfp import LFPV1 # Upstream 2 @@ -192,8 +192,8 @@ # 2. use `merge_delete_parent` to delete from the parent sources, getting rid of # the entries in the source table they came from. # -# 3. use `delete_downstream_merge` to find Merge Tables downstream of any other -# table and get rid full entries, avoiding orphaned master table entries. +# 3. use `delete_downstream_parts` to find downstream part tables, like Merge +# Tables, and get rid full entries, avoiding orphaned master table entries. # # The two latter cases can be destructive, so we include an extra layer of # protection with `dry_run`. When true (by default), these functions return @@ -204,7 +204,7 @@ LFPOutput.merge_delete_parent(restriction=nwb_file_dict, dry_run=True) -# `delete_downstream_merge` is available from any other table in the pipeline, +# `delete_downstream_parts` is available from any other table in the pipeline, # but it does take some time to find the links downstream. If you're using this, # you can save time by reassigning your table to a variable, which will preserve # a copy of the previous search. @@ -216,7 +216,7 @@ # + nwbfile = sgc.Nwbfile() -(nwbfile & nwb_file_dict).delete_downstream_merge( +(nwbfile & nwb_file_dict).delete_downstream_parts( dry_run=True, reload_cache=False, # if still encountering errors, try setting this to True ) diff --git a/pyproject.toml b/pyproject.toml index 2538b00dc..061947e3d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,8 +126,8 @@ ignore-words-list = 'nevers' minversion = "7.0" addopts = [ # "-sv", # no capture, verbose output - # "--sw", # stepwise: resume with next test after failure - # "--pdb", # drop into debugger on failure + "--sw", # stepwise: resume with next test after failure + "--pdb", # drop into debugger on failure "-p no:warnings", # "--no-teardown", # don't teardown the database after tests # "--quiet-spy", # don't show logging from spyglass diff --git a/src/spyglass/common/common_interval.py b/src/spyglass/common/common_interval.py index 66e82bda8..6e4d6b042 100644 --- a/src/spyglass/common/common_interval.py +++ b/src/spyglass/common/common_interval.py @@ -7,6 +7,7 @@ import pandas as pd from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.dj_helper_fn import get_child_tables from .common_session import Session # noqa: F401 @@ -152,6 +153,12 @@ def plot_epoch_pos_raw_intervals(self, figsize=(20, 5), return_fig=False): if return_fig: return fig + def nightly_cleanup(self, dry_run=True): + orphans = self - get_child_tables(self) + if dry_run: + return orphans + orphans.super_delete(warn=False) + def intervals_by_length(interval_list, min_length=0.0, max_length=1e10): """Select intervals of certain lengths from an interval list. diff --git a/src/spyglass/common/common_usage.py b/src/spyglass/common/common_usage.py index 6616fedf6..f6f15ce76 100644 --- a/src/spyglass/common/common_usage.py +++ b/src/spyglass/common/common_usage.py @@ -15,7 +15,7 @@ from pynwb import NWBHDF5IO from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile -from spyglass.settings import export_dir +from spyglass.settings import export_dir, test_mode from spyglass.utils import SpyglassMixin, logger from spyglass.utils.dj_graph import RestrGraph from spyglass.utils.dj_helper_fn import ( @@ -122,7 +122,8 @@ def insert1_return_pk(self, key: dict, **kwargs) -> int: export_id = query.fetch1("export_id") export_key = {"export_id": export_id} if query := (Export & export_key): - query.super_delete(warn=False) + safemode = False if test_mode else None # No prompt in tests + query.super_delete(warn=False, safemode=safemode) logger.info(f"{status} {export_key}") return export_id @@ -193,9 +194,11 @@ def _max_export_id(self, paper_id: str, return_all=False) -> int: all_export_ids = query.fetch("export_id") return all_export_ids if return_all else max(all_export_ids) - def paper_export_id(self, paper_id: str) -> dict: + def paper_export_id(self, paper_id: str, return_all=False) -> dict: """Return the maximum export_id for a paper, used to populate Export.""" - return {"export_id": self._max_export_id(paper_id)} + if not return_all: + return {"export_id": self._max_export_id(paper_id)} + return [{"export_id": id} for id in self._max_export_id(paper_id, True)] @schema @@ -234,11 +237,11 @@ def populate_paper(self, paper_id: Union[str, dict]): self.populate(ExportSelection().paper_export_id(paper_id)) def make(self, key): - query = ExportSelection & key - paper_key = query.fetch("paper_id", as_dict=True)[0] + paper_key = (ExportSelection & key).fetch("paper_id", as_dict=True)[0] + query = ExportSelection & paper_key # Null insertion if export_id is not the maximum for the paper - all_export_ids = query._max_export_id(paper_key, return_all=True) + all_export_ids = ExportSelection()._max_export_id(paper_key, True) max_export_id = max(all_export_ids) if key.get("export_id") != max_export_id: logger.info( @@ -259,7 +262,7 @@ def make(self, key): (self.Table & id_dict).delete_quick() (self.Table & id_dict).delete_quick() - restr_graph = query.get_restr_graph(paper_key) + restr_graph = ExportSelection().get_restr_graph(paper_key) file_paths = unique_dicts( # Original plus upstream files query.list_file_paths(paper_key) + restr_graph.file_paths ) @@ -275,7 +278,7 @@ def make(self, key): # Writes but does not run mysqldump. Assumes single version per paper. version_key = query.fetch("spyglass_version", as_dict=True)[0] self.write_export( - free_tables=restr_graph.all_ft, **paper_key, **version_key + free_tables=restr_graph.restr_ft, **paper_key, **version_key ) self.insert1({**key, **paper_key}) diff --git a/src/spyglass/decoding/v0/core.py b/src/spyglass/decoding/v0/core.py index 5664c12d9..3df82f318 100644 --- a/src/spyglass/decoding/v0/core.py +++ b/src/spyglass/decoding/v0/core.py @@ -13,6 +13,7 @@ ObservationModel, ) except ImportError as e: + RandomWalk, Uniform, Environment, ObservationModel = None, None, None, None logger.warning(e) from spyglass.common.common_behav import PositionIntervalMap, RawPosition diff --git a/src/spyglass/utils/dj_graph.py b/src/spyglass/utils/dj_graph.py index 5bf3d25d0..3e90d4736 100644 --- a/src/spyglass/utils/dj_graph.py +++ b/src/spyglass/utils/dj_graph.py @@ -4,16 +4,16 @@ """ from abc import ABC, abstractmethod -from collections.abc import KeysView +from copy import deepcopy from enum import Enum from functools import cached_property from itertools import chain as iter_chain -from typing import Any, Dict, List, Set, Tuple, Union +from typing import Any, Dict, Iterable, List, Set, Tuple, Union -import datajoint as dj from datajoint import FreeTable, Table from datajoint.condition import make_condition from datajoint.dependencies import unite_master_parts +from datajoint.user_tables import TableMeta from datajoint.utils import get_master, to_camel_case from networkx import ( NetworkXNoPath, @@ -25,12 +25,12 @@ from tqdm import tqdm from spyglass.utils import logger +from spyglass.utils.database_settings import SHARED_MODULES from spyglass.utils.dj_helper_fn import ( PERIPHERAL_TABLES, fuzzy_get, unique_dicts, ) -from spyglass.utils.dj_merge_tables import is_merge_table class Direction(Enum): @@ -70,10 +70,12 @@ class AbstractGraph(ABC): ------- cascade: Abstract method implemented by child classes cascade1: Cascade a restriction up/down the graph, recursively + ft_from_list: Return non-empty FreeTable objects from list of table names Properties ---------- all_ft: Get all FreeTables for visited nodes with restrictions applied. + restr_ft: Get non-empty FreeTables for visited nodes with restrictions. as_dict: Get visited nodes as a list of dictionaries of {table_name: restriction} """ @@ -91,11 +93,16 @@ def __init__(self, seed_table: Table, verbose: bool = False, **kwargs): self.seed_table = seed_table self.connection = seed_table.connection - # Undirected graph may not be needed, but adding FT to the graph - # prevents `to_undirected` from working. If using undirected, remove - # PERIPHERAL_TABLES from the graph. - self.graph = seed_table.connection.dependencies - self.graph.load() + # Deepcopy graph to avoid seed `load()` resetting custom attributes + seed_table.connection.dependencies.load() + graph = seed_table.connection.dependencies + orig_conn = graph._conn # Cannot deepcopy connection + graph._conn = None + self.graph = deepcopy(graph) + graph._conn = orig_conn + + # undirect not needed in all cases but need to do before adding ft nodes + self.undirect_graph = self.graph.to_undirected() self.verbose = verbose self.leaves = set() @@ -111,6 +118,24 @@ def cascade(self): """Cascade restrictions through graph.""" raise NotImplementedError("Child class mut implement `cascade` method") + # --------------------------- Dunder Properties --------------------------- + + def __repr__(self): + l_str = ( + ",\n\t".join(self._camel(self.leaves)) + "\n" + if self.leaves + else "Seed: " + self._camel(self.seed_table) + "\n" + ) + casc_str = "Cascaded" if self.cascaded else "Uncascaded" + return f"{casc_str} {self.__class__.__name__}(\n\t{l_str})" + + def __getitem__(self, index: Union[int, str]): + names = [t.full_table_name for t in self.restr_ft] + return fuzzy_get(index, names, self.restr_ft) + + def __len__(self): + return len(self.restr_ft) + # ---------------------------- Logging Helpers ---------------------------- def _log_truncate(self, log_str: str, max_len: int = 80): @@ -123,34 +148,33 @@ def _log_truncate(self, log_str: str, max_len: int = 80): def _camel(self, table): """Convert table name(s) to camel case.""" - if isinstance(table, KeysView): - table = list(table) - if not isinstance(table, list): - table = [table] - ret = [to_camel_case(t.split(".")[-1].strip("`")) for t in table] - return ret[0] if len(ret) == 1 else ret - - def _print_restr(self): - """Print restrictions for debugging.""" - for table in self.visited: - if restr := self._get_restr(table): - logger.info(f"{table}: {restr}") + table = self._ensure_names(table) + if isinstance(table, str): + return to_camel_case(table.split(".")[-1].strip("`")) + if isinstance(table, Iterable) and not isinstance( + table, (Table, TableMeta) + ): + return [self._camel(t) for t in table] # ------------------------------ Graph Nodes ------------------------------ - def _ensure_name(self, table: Union[str, Table] = None) -> str: + def _ensure_names( + self, table: Union[str, Table] = None + ) -> Union[str, List[str]]: """Ensure table is a string.""" if table is None: return None if isinstance(table, str): return table - if isinstance(table, list): - return [self._ensure_name(t) for t in table] + if isinstance(table, Iterable) and not isinstance( + table, (Table, TableMeta) + ): + return [self._ensure_names(t) for t in table] return getattr(table, "full_table_name", None) def _get_node(self, table: Union[str, Table]): """Get node from graph.""" - table = self._ensure_name(table) + table = self._ensure_names(table) if not (node := self.graph.nodes.get(table)): raise ValueError( f"Table {table} not found in graph." @@ -160,6 +184,7 @@ def _get_node(self, table: Union[str, Table]): def _set_node(self, table, attr: str = "ft", value: Any = None): """Set attribute on node. General helper for various attributes.""" + table = self._ensure_names(table) _ = self._get_node(table) # Ensure node exists self.graph.nodes[table][attr] = value @@ -175,8 +200,8 @@ def _get_edge(self, child: str, parent: str) -> Tuple[bool, Dict[str, str]]: Tuple of boolean indicating direction and edge data. True if child is child of parent. """ - child = self._ensure_name(child) - parent = self._ensure_name(parent) + child = self._ensure_names(child) + parent = self._ensure_names(parent) if edge := self.graph.get_edge_data(parent, child): return False, edge @@ -196,7 +221,7 @@ def _get_edge(self, child: str, parent: str) -> Tuple[bool, Dict[str, str]]: def _get_restr(self, table): """Get restriction from graph node.""" - return self._get_node(self._ensure_name(table)).get("restr") + return self._get_node(self._ensure_names(table)).get("restr") def _set_restr(self, table, restriction, replace=False): """Add restriction to graph node. If one exists, merge with new.""" @@ -207,6 +232,7 @@ def _set_restr(self, table, restriction, replace=False): else restriction ) existing = self._get_restr(table) + if not replace and existing: if restriction == existing: return @@ -219,12 +245,13 @@ def _set_restr(self, table, restriction, replace=False): self._set_node(table, "restr", restriction) - def _get_ft(self, table, with_restr=False): + def _get_ft(self, table, with_restr=False, warn=True): """Get FreeTable from graph node. If one doesn't exist, create it.""" - table = self._ensure_name(table) + table = self._ensure_names(table) if with_restr: if not (restr := self._get_restr(table) or False): - self._log_truncate(f"No restriction for {table}") + if warn: + self._log_truncate(f"No restr for {self._camel(table)}") else: restr = True @@ -234,13 +261,14 @@ def _get_ft(self, table, with_restr=False): return ft & restr - def _and_parts(self, table): - """Return table, its master and parts.""" - ret = [table] - if master := get_master(table): - ret.append(master) - if parts := self._get_ft(table).parts(): - ret.extend(parts) + def _is_out(self, table, warn=True): + """Check if table is outside of spyglass.""" + table = self._ensure_names(table) + if self.graph.nodes.get(table): + return False + ret = table.split(".")[0].split("_")[0].strip("`") not in SHARED_MODULES + if warn and ret: # Log warning if outside + logger.warning(f"Skipping unimported: {table}") return ret # ---------------------------- Graph Traversal ----------------------------- @@ -282,15 +310,19 @@ def _bridge_restr( List[Dict[str, str]] List of dicts containing primary key fields for restricted table2. """ + if self._is_out(table2) or self._is_out(table1): # 2 more likely + return ["False"] # Stop cascade if outside, see #1002 + if not all([direction, attr_map]): dir_bool, edge = self._get_edge(table1, table2) direction = "up" if dir_bool else "down" attr_map = edge.get("attr_map") + # May return empty table if outside imported and outside spyglass ft1 = self._get_ft(table1) & restr ft2 = self._get_ft(table2) - if len(ft1) == 0: + if len(ft1) == 0 or len(ft2) == 0: return ["False"] if bool(set(attr_map.values()) - set(ft1.heading.names)): @@ -333,11 +365,16 @@ def _get_next_tables(self, table: str, direction: Direction) -> Tuple: G = self.graph dir_dict = {"direction": direction} - bonus = {} + bonus = {} # Add master and parts to next tables direction = Direction(direction) if direction == Direction.UP: next_func = G.parents - bonus.update({part: {} for part in self._get_ft(table).parts()}) + table_ft = self._get_ft(table) + for part in table_ft.parts(): # Assumes parts do not alias master + bonus[part] = { + "attr_map": {k: k for k in table_ft.primary_key}, + **dir_dict, + } elif direction == Direction.DOWN: next_func = G.children if (master_name := get_master(table)) != "": @@ -382,9 +419,12 @@ def cascade1( next_tables, next_func = self._get_next_tables(table, direction) - self._log_truncate( - f"Checking {count:>2}: {self._camel(next_tables.keys())}" - ) + if next_list := next_tables.keys(): + self._log_truncate( + f"Checking {count:>2}: {self._camel(table)}" + + f" -> {self._camel(next_list)}" + ) + for next_table, data in next_tables.items(): if next_table.isnumeric(): # Skip alias nodes next_table, data = next_func(next_table).popitem() @@ -422,21 +462,81 @@ def cascade1( # ---------------------------- Graph Properties ---------------------------- + def _topo_sort( + self, nodes: List[str], subgraph: bool = True, reverse: bool = False + ) -> List[str]: + """Return topologically sorted list of nodes. + + Parameters + ---------- + nodes : List[str] + List of table names + subgraph : bool, optional + Whether to use subgraph. Default True + reverse : bool, optional + Whether to reverse the order. Default False. If true, bottom-up. + If None, return nodes as is. + """ + if reverse is None: + return nodes + nodes = [ + node + for node in self._ensure_names(nodes) + if not self._is_out(node, warn=False) + ] + graph = self.graph.subgraph(nodes) if subgraph else self.graph + ordered = unite_master_parts(list(topological_sort(graph))) + if reverse: + ordered.reverse() + return [n for n in ordered if n in nodes] + @property def all_ft(self): """Get restricted FreeTables from all visited nodes. Topological sort logic adopted from datajoint.diagram. """ - self.cascade() + self.cascade(warn=False) nodes = [n for n in self.visited if not n.isnumeric()] - sorted_nodes = unite_master_parts( - list(topological_sort(self.graph.subgraph(nodes))) - ) - all_ft = [ - self._get_ft(table, with_restr=True) for table in sorted_nodes + return [ + self._get_ft(table, with_restr=True, warn=False) + for table in self._topo_sort(nodes, subgraph=True, reverse=False) ] - return [ft for ft in all_ft if len(ft) > 0] + + @property + def restr_ft(self): + """Get non-empty restricted FreeTables from all visited nodes.""" + return [ft for ft in self.all_ft if len(ft) > 0] + + def ft_from_list( + self, + tables: List[str], + with_restr: bool = True, + sort_reverse: bool = None, + return_empty: bool = False, + ) -> List[FreeTable]: + """Return non-empty FreeTable objects from list of table names. + + Parameters + ---------- + tables : List[str] + List of table names + with_restr : bool, optional + Restrict FreeTable to restriction. Default True. + sort_reverse : bool, optional + Sort reverse topologically. Default True. If None, no sort. + """ + + self.cascade(warn=False) + + fts = [ + self._get_ft(table, with_restr=with_restr, warn=False) + for table in self._topo_sort( + tables, subgraph=False, reverse=sort_reverse + ) + ] + + return fts if return_empty else [ft for ft in fts if len(ft) > 0] @property def as_dict(self) -> List[Dict[str, str]]: @@ -453,9 +553,8 @@ class RestrGraph(AbstractGraph): def __init__( self, seed_table: Table, - table_name: str = None, - restriction: str = None, leaves: List[Dict[str, str]] = None, + destinations: List[str] = None, direction: Direction = "up", cascade: bool = False, verbose: bool = False, @@ -473,13 +572,12 @@ def __init__( ---------- seed_table : Table Table to use to establish connection and graph - table_name : str, optional - Table name of single leaf, default None - restriction : str, optional - Restriction to apply to leaf. default None leaves : Dict[str, str], optional List of dictionaries with keys table_name and restriction. One entry per leaf node. Default None. + destinations : List[str], optional + List of endpoints of interest in the graph. Default None. Used to + ignore nodes not in the path(s) to the destination(s). direction : Direction, optional Direction to cascade. Default 'up' cascade : bool, optional @@ -490,27 +588,18 @@ def __init__( """ super().__init__(seed_table, verbose=verbose) - self.add_leaf( - table_name=table_name, restriction=restriction, direction=direction - ) self.add_leaves(leaves) - if cascade: - self.cascade(direction=direction) + dir_list = ["up", "down"] if direction == "both" else [direction] - # --------------------------- Dunder Properties --------------------------- - - def __repr__(self): - l_str = ",\n\t".join(self.leaves) + "\n" if self.leaves else "" - processed = "Cascaded" if self.cascaded else "Uncascaded" - return f"{processed} {self.__class__.__name__}(\n\t{l_str})" - - def __getitem__(self, index: Union[int, str]): - all_ft_names = [t.full_table_name for t in self.all_ft] - return fuzzy_get(index, all_ft_names, self.all_ft) - - def __len__(self): - return len(self.all_ft) + if cascade: + for dir in dir_list: + self._log_truncate(f"Start {dir:<4} : {self.leaves}") + self.cascade(direction=dir) + self.cascaded = False + self.visited -= self.leaves + self.cascaded = True + self.visited |= self.leaves # ---------------------------- Public Properties -------------------------- @@ -558,7 +647,13 @@ def add_leaf( self.cascaded = True def _process_leaves(self, leaves=None, default_restriction=True): - """Process leaves to ensure they are unique and have required keys.""" + """Process leaves to ensure they are unique and have required keys. + + Accepts ... + - [str]: table names, use default_restriction + - [{'table_name': str, 'restriction': str}]: used for export + - [{table_name: restriction}]: userd for distance restriction + """ if not leaves: return [] if not isinstance(leaves, list): @@ -568,10 +663,22 @@ def _process_leaves(self, leaves=None, default_restriction=True): {"table_name": leaf, "restriction": default_restriction} for leaf in leaves ] - if all(isinstance(leaf, dict) for leaf in leaves) and not all( - leaf.get("table_name") for leaf in leaves - ): - raise ValueError(f"All leaves must have table_name: {leaves}") + hashable = True + if all(isinstance(leaf, dict) for leaf in leaves): + new_leaves = [] + for leaf in leaves: + if "table_name" in leaf and "restriction" in leaf: + new_leaves.append(leaf) + continue + for table, restr in leaf.items(): + if not isinstance(restr, (str, dict)): + hashable = False # likely a dj.AndList + new_leaves.append( + {"table_name": table, "restriction": restr} + ) + if not hashable: + return new_leaves + leaves = new_leaves return unique_dicts(leaves) @@ -609,7 +716,7 @@ def add_leaves( # ------------------------------ Graph Traversal -------------------------- - def cascade(self, show_progress=None, direction="up") -> None: + def cascade(self, show_progress=None, direction="up", warn=True) -> None: """Cascade all restrictions up the graph. Parameters @@ -618,6 +725,8 @@ def cascade(self, show_progress=None, direction="up") -> None: Show tqdm progress bar. Default to verbose setting. """ if self.cascaded: + if warn: + self._log_truncate("Already cascaded") return to_visit = self.leaves - self.visited @@ -629,27 +738,16 @@ def cascade(self, show_progress=None, direction="up") -> None: disable=not (show_progress or self.verbose), ): restr = self._get_restr(table) - self._log_truncate(f"Start {table}: {restr}") + self._log_truncate( + f"Start {direction:<4}: {self._camel(table)}, {restr}" + ) self.cascade1(table, restr, direction=direction) - self.cascade_files() - self.cascaded = True + self.cascaded = True # Mark here so next step can use `restr_ft` + self.cascade_files() # Otherwise attempts to re-cascade, recursively # ----------------------------- File Handling ----------------------------- - def _get_files(self, table): - """Get analysis files from graph node.""" - return self._get_node(table).get("files", []) - - def cascade_files(self): - """Set node attribute for analysis files.""" - for table in self.visited: - ft = self._get_ft(table, with_restr=True) - if not set(self.analysis_pk).issubset(ft.heading.names): - continue - files = list(ft.fetch(*self.analysis_pk)) - self._set_node(table, "files", files) - @property def analysis_file_tbl(self) -> Table: """Return the analysis file table. Avoids circular import.""" @@ -657,10 +755,14 @@ def analysis_file_tbl(self) -> Table: return AnalysisNwbfile() - @property - def analysis_pk(self) -> List[str]: - """Return primary key fields from analysis file table.""" - return self.analysis_file_tbl.primary_key + def cascade_files(self): + """Set node attribute for analysis files.""" + analysis_pk = self.analysis_file_tbl.primary_key + for ft in self.restr_ft: + if not set(analysis_pk).issubset(ft.heading.names): + continue + files = list(ft.fetch(*analysis_pk)) + self._set_node(ft, "files", files) @property def file_dict(self) -> Dict[str, List[str]]: @@ -668,8 +770,8 @@ def file_dict(self) -> Dict[str, List[str]]: Included for debugging, to associate files with tables. """ - self.cascade() - return {t: self._get_node(t).get("files", []) for t in self.visited} + self.cascade(warn=False) + return {t: self._get_node(t).get("files", []) for t in self.restr_ft} @property def file_paths(self) -> List[str]: @@ -688,96 +790,16 @@ def file_paths(self) -> List[str]: ] -class TableChains: - """Class for representing chains from parent to Merge table via parts. - - Functions as a plural version of TableChain, allowing a single `cascade` - call across all chains from parent -> Merge table. - - Attributes - ---------- - parent : Table - Parent or origin of chains. - child : Table - Merge table or destination of chains. - connection : datajoint.Connection, optional - Connection to database used to create FreeTable objects. Defaults to - parent.connection. - part_names : List[str] - List of full table names of child parts. - chains : List[TableChain] - List of TableChain objects for each part in child. - has_link : bool - Cached attribute to store whether parent is linked to child via any of - child parts. False if (a) child is not in parent.descendants or (b) - nx.NetworkXNoPath is raised by nx.shortest_path for all chains. - - Methods - ------- - __init__(parent, child, connection=None) - Initialize TableChains with parent and child tables. - __repr__() - Return full representation of chains. - Multiline parent -> child for each chain. - __len__() - Return number of chains with links. - __getitem__(index: Union[int, str]) - Return TableChain object at index, or use substring of table name. - cascade(restriction: str = None) - Return list of cascade for each chain in self.chains. - """ - - def __init__(self, parent, child, direction=Direction.DOWN, verbose=False): - self.parent = parent - self.child = child - self.connection = parent.connection - self.part_names = child.parts() - self.chains = [ - TableChain(parent, part, direction=direction, verbose=verbose) - for part in self.part_names - ] - self.has_link = any([chain.has_link for chain in self.chains]) - - # --------------------------- Dunder Properties --------------------------- - - def __repr__(self): - l_str = ",\n\t".join([str(c) for c in self.chains]) + "\n" - return f"{self.__class__.__name__}(\n\t{l_str})" - - def __len__(self): - return len([c for c in self.chains if c.has_link]) - - def __getitem__(self, index: Union[int, str]): - """Return FreeTable object at index.""" - return fuzzy_get(index, self.part_names, self.chains) - - # ---------------------------- Public Properties -------------------------- - - @property - def max_len(self): - """Return length of longest chain.""" - return max([len(chain) for chain in self.chains]) - - # ------------------------------ Graph Traversal -------------------------- - - def cascade( - self, restriction: str = None, direction: Direction = Direction.DOWN - ): - """Return list of cascades for each chain in self.chains.""" - restriction = restriction or self.parent.restriction or True - cascades = [] - for chain in self.chains: - if joined := chain.cascade(restriction, direction): - cascades.append(joined) - return cascades - - class TableChain(RestrGraph): """Class for representing a chain of tables. A chain is a sequence of tables from parent to child identified by - networkx.shortest_path. Parent -> Merge should use TableChains instead to - handle multiple paths to the respective parts of the Merge table. + networkx.shortest_path from parent to child. To avoid issues with merge + tables, use the Merge table as the child, not the part table. + + Either the parent or child can be omitted if a search_restr is provided. + The missing table will be found by searching for where the restriction + can be applied. Attributes ---------- @@ -789,9 +811,6 @@ class TableChain(RestrGraph): Cached attribute to store whether parent is linked to child. path : List[str] Names of tables along the path from parent to child. - all_ft : List[dj.FreeTable] - List of FreeTable objects for each table in chain with restriction - applied. Methods ------- @@ -804,6 +823,8 @@ class TableChain(RestrGraph): Given a restriction at the beginning, return a restricted FreeTable object at the end of the chain. If direction is 'up', start at the child and move up to the parent. If direction is 'down', start at the parent. + cascade_search() + Search from the leaf node to find where a restriction can be applied. """ def __init__( @@ -814,27 +835,21 @@ def __init__( search_restr: str = None, cascade: bool = False, verbose: bool = False, - allow_merge: bool = False, banned_tables: List[str] = None, **kwargs, ): - if not allow_merge and child is not None and is_merge_table(child): - raise TypeError("Child is a merge table. Use TableChains instead.") - - self.parent = self._ensure_name(parent) - self.child = self._ensure_name(child) + self.parent = self._ensure_names(parent) + self.child = self._ensure_names(child) if not self.parent and not self.child: raise ValueError("Parent or child table required.") - if not search_restr and not (self.parent and self.child): - raise ValueError("Search restriction required to find path.") seed_table = parent if isinstance(parent, Table) else child super().__init__(seed_table=seed_table, verbose=verbose) - self.no_visit.update(PERIPHERAL_TABLES) - self.no_visit.update(self._ensure_name(banned_tables) or []) - self.no_visit.difference_update([self.parent, self.child]) + self._ignore_peripheral(except_tables=[self.parent, self.child]) + self.no_visit.update(self._ensure_names(banned_tables) or []) + self.no_visit.difference_update(set([self.parent, self.child])) self.searched_tables = set() self.found_restr = False self.link_type = None @@ -843,6 +858,8 @@ def __init__( self.search_restr = search_restr self.direction = Direction(direction) + if self.parent and self.child and not self.direction: + self.direction = Direction.DOWN self.leaf = None if search_restr and not parent: @@ -856,10 +873,20 @@ def __init__( self.add_leaf(self.leaf, True, cascade=False, direction=direction) if cascade and search_restr: - self.cascade_search() - self.cascade(restriction=search_restr) + self.cascade_search() # only cascade if found or not looking + if (search_restr and self.found_restr) or not search_restr: + self.cascade(restriction=search_restr) self.cascaded = True + # ------------------------------ Ignore Nodes ------------------------------ + + def _ignore_peripheral(self, except_tables: List[str] = None): + """Ignore peripheral tables in graph traversal.""" + except_tables = self._ensure_names(except_tables) + ignore_tables = set(PERIPHERAL_TABLES) - set(except_tables or []) + self.no_visit.update(ignore_tables) + self.undirect_graph.remove_nodes_from(ignore_tables) + # --------------------------- Dunder Properties --------------------------- def __str__(self): @@ -884,9 +911,6 @@ def __len__(self): return 0 return len(self.path) - def __getitem__(self, index: Union[int, str]): - return fuzzy_get(index, self.path, self.all_ft) - # ---------------------------- Public Properties -------------------------- @property @@ -900,26 +924,18 @@ def has_link(self) -> bool: _ = self.path return self.link_type is not None - @cached_property - def all_ft(self) -> List[dj.FreeTable]: - """Return list of FreeTable objects for each table in chain. - - Unused. Preserved for future debugging. - """ - if not self.has_link: - return None - return [ - self._get_ft(table, with_restr=False) - for table in self.path - if not table.isnumeric() - ] - @property def path_str(self) -> str: if not self.path: return "No link" return self._link_symbol.join([self._camel(t) for t in self.path]) + @property + def path_ft(self) -> List[FreeTable]: + """Return FreeTables along the path.""" + path_with_ends = set([self.parent, self.child]) | set(self.path) + return self.ft_from_list(path_with_ends, with_restr=True) + # ------------------------------ Graph Nodes ------------------------------ def _set_find_restr(self, table_name, restriction): @@ -962,6 +978,7 @@ def cascade_search(self) -> None: replace=True, ) if not self.found_restr: + self.link_type = None searched = ( "parents" if self.direction == Direction.UP else "children" ) @@ -975,7 +992,14 @@ def _set_found_vars(self, table): """Set found_restr and searched_tables.""" self._set_restr(table, self.search_restr, replace=True) self.found_restr = True - self.searched_tables.update(set(self._and_parts(table))) + + and_parts = set([table]) + if master := get_master(table): + and_parts.add(master) + if parts := self._get_ft(table).parts(): + and_parts.update(parts) + + self.searched_tables.update(and_parts) if self.direction == Direction.UP: self.parent = table @@ -1055,12 +1079,7 @@ def find_path(self, directed=True) -> List[str]: List of names in the path. """ source, target = self.parent, self.child - search_graph = self.graph - - if not directed: - self.connection.dependencies.load() - self.undirect_graph = self.connection.dependencies.to_undirected() - search_graph = self.undirect_graph + search_graph = self.graph if directed else self.undirect_graph search_graph.remove_nodes_from(self.no_visit) @@ -1077,7 +1096,6 @@ def find_path(self, directed=True) -> List[str]: ignore_nodes = self.graph.nodes - set(path) self.no_visit.update(ignore_nodes) - self._log_truncate(f"Ignore : {ignore_nodes}") return path @cached_property @@ -1095,7 +1113,9 @@ def path(self) -> list: return path - def cascade(self, restriction: str = None, direction: Direction = None): + def cascade( + self, restriction: str = None, direction: Direction = None, **kwargs + ): if not self.has_link: return @@ -1111,11 +1131,20 @@ def cascade(self, restriction: str = None, direction: Direction = None): self.cascade1( table=start, - restriction=restriction or self._get_restr(start), + restriction=restriction or self._get_restr(start) or True, direction=direction, replace=True, ) + # Cascade will stop if any restriction is empty, so set rest to None + # This would cause issues if we want a table partway through the chain + # but that's not a typical use case, were the start and end are desired + non_numeric = [t for t in self.path if not t.isnumeric()] + if any(self._get_restr(t) is None for t in non_numeric): + for table in non_numeric: + if table is not start: + self._set_restr(table, False, replace=True) + return self._get_ft(end, with_restr=True) def restrict_by(self, *args, **kwargs) -> None: diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index f42c9858a..da7d30a3b 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -265,7 +265,6 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): rec_dict["nwb2load_filepath"] = file_path continue - # Pulled from future cbroz1/ndo # Full dict caused issues with dlc tables using dicts in secondary keys rec_only_pk = {k: rec_dict[k] for k in query_table.heading.primary_key} rec_dict["nwb2load_filepath"] = (query_table & rec_only_pk).fetch1( @@ -352,7 +351,7 @@ def update_analysis_for_dandi_standard( species_value = file["/general/subject/species"][()].decode("utf-8") if species_value == "Rat": new_species_value = "Rattus norvegicus" - print( + logger.info( f"Adjusting subject species from '{species_value}' to " + f"'{new_species_value}'." ) @@ -363,10 +362,10 @@ def update_analysis_for_dandi_standard( ): raise ValueError( "Dandi upload requires species either be in Latin binomial form" - + " (e.g., 'Mus musculus' and 'Homo sapiens')" - + "or be a NCBI taxonomy link (e.g., " - + "'http://purl.obolibrary.org/obo/NCBITaxon_280675')." - + f"\n Please update species value of: {species_value}" + + " (e.g., 'Mus musculus' and 'Homo sapiens') or be a NCBI " + + "taxonomy link (e.g., " + + "'http://purl.obolibrary.org/obo/NCBITaxon_280675').\n " + + f"Please update species value of: {species_value}" ) # add subject age dataset "P4M/P8M" @@ -385,7 +384,8 @@ def update_analysis_for_dandi_standard( if experimenter_value != new_experimenter_value: new_experimenter_value = new_experimenter_value.astype(STR_DTYPE) logger.info( - f"Adjusting experimenter from {experimenter_value} to {new_experimenter_value}." + f"Adjusting experimenter from {experimenter_value} to " + + f"{new_experimenter_value}." ) file["/general/experimenter"][:] = new_experimenter_value diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index d1176de30..474ff91c8 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -820,9 +820,8 @@ def delete_downstream_merge( ) -> list: """Given a table/restriction, id or delete relevant downstream merge entries - Passthrough to SpyglassMixin.delete_downstream_merge + Passthrough to SpyglassMixin.delete_downstream_parts """ - from spyglass.common.common_usage import ActivityLog from spyglass.utils.dj_mixin import SpyglassMixin @@ -834,4 +833,4 @@ def delete_downstream_merge( raise ValueError("Input must be a Spyglass Table.") table = table if isinstance(table, dj.Table) else table() - return table.delete_downstream_merge(**kwargs) + return table.delete_downstream_parts(**kwargs) diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index 4cdbbbaa0..1db44078a 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -1,6 +1,5 @@ from atexit import register as exit_register from atexit import unregister as exit_unregister -from collections import OrderedDict from contextlib import nullcontext from functools import cached_property from inspect import stack as inspect_stack @@ -25,7 +24,6 @@ get_nwb_table, populate_pass_function, ) -from spyglass.utils.dj_merge_tables import RESERVED_PRIMARY_KEY as MERGE_PK from spyglass.utils.dj_merge_tables import Merge, is_merge_table from spyglass.utils.logging import logger @@ -56,8 +54,8 @@ class SpyglassMixin: `restriction` can be set to a string to restrict the delete. `dry_run` can be set to False to commit the delete. `reload_cache` can be set to True to reload the merge cache. - ddm(*args, **kwargs) - Alias for delete_downstream_merge. + ddp(*args, **kwargs) + Alias for delete_downstream_parts cautious_delete(force_permission=False, *args, **kwargs) Check user permissions before deleting table rows. Permission is granted to users listed as admin in LabMember table or to users on a team with @@ -117,14 +115,14 @@ def _auto_increment(self, key, pk, *args, **kwargs): def file_like(self, name=None, **kwargs): """Convenience method for wildcard search on file name fields.""" if not name: - return self & True + return self attr = None for field in self.heading.names: if "file" in field: attr = field break if not attr: - logger.error(f"No file-like field found in {self.full_table_name}") + logger.error(f"No file_like field found in {self.full_table_name}") return return self & f"{attr} LIKE '%{name}%'" @@ -257,135 +255,145 @@ def fetch_pynapple(self, *attrs, **kwargs): for file_name in nwb_files ] - # ------------------------ delete_downstream_merge ------------------------ + # ------------------------ delete_downstream_parts ------------------------ - def _import_merge_tables(self): - """Import all merge tables downstream of self.""" + def _import_part_masters(self): + """Import tables that may constrain a RestrGraph. See #1002""" + from spyglass.common.common_ripple import ( + RippleLFPSelection, + ) # noqa F401 from spyglass.decoding.decoding_merge import DecodingOutput # noqa F401 + from spyglass.decoding.v0.clusterless import ( # noqa F401 + UnitMarksIndicatorSelection, + ) + from spyglass.decoding.v0.sorted_spikes import ( # noqa F401 + SortedSpikesIndicatorSelection, + ) + from spyglass.decoding.v1.core import PositionGroup # noqa F401 + from spyglass.lfp.analysis.v1 import LFPBandSelection # noqa F401 from spyglass.lfp.lfp_merge import LFPOutput # noqa F401 - from spyglass.linearization.merge import ( + from spyglass.linearization.merge import ( # noqa F401 LinearizedPositionOutput, - ) # noqa F401 + LinearizedPositionV1, + ) + from spyglass.mua.v1.mua import MuaEventsV1 # noqa F401 from spyglass.position.position_merge import PositionOutput # noqa F401 + from spyglass.ripple.v1.ripple import RippleTimesV1 # noqa F401 + from spyglass.spikesorting.analysis.v1.group import ( # noqa F401 + SortedSpikesGroup, + ) from spyglass.spikesorting.spikesorting_merge import ( # noqa F401 SpikeSortingOutput, ) + from spyglass.spikesorting.v0.figurl_views import ( # noqa F401 + SpikeSortingRecordingView, + ) _ = ( DecodingOutput(), + LFPBandSelection(), LFPOutput(), LinearizedPositionOutput(), + LinearizedPositionV1(), + MuaEventsV1(), + PositionGroup(), PositionOutput(), + RippleLFPSelection(), + RippleTimesV1(), + SortedSpikesGroup(), + SortedSpikesIndicatorSelection(), SpikeSortingOutput(), + SpikeSortingRecordingView(), + UnitMarksIndicatorSelection(), ) @cached_property - def _merge_tables(self) -> Dict[str, dj.FreeTable]: - """Dict of merge tables downstream of self: {full_table_name: FreeTable}. + def _part_masters(self) -> set: + """Set of master tables downstream of self. - Cache of items in parents of self.descendants(as_objects=True). Both - descendant and parent must have the reserved primary key 'merge_id'. + Cache of masters in self.descendants(as_objects=True) with another + foreign key reference in the part. Used for delete_downstream_parts. """ self.connection.dependencies.load() - merge_tables = {} - visited = set() + part_masters = set() def search_descendants(parent): for desc in parent.descendants(as_objects=True): - if ( - MERGE_PK not in desc.heading.names - or not (master_name := get_master(desc.full_table_name)) - or master_name in merge_tables + if ( # Check if has master, is part + not (master := get_master(desc.full_table_name)) + # has other non-master parent + or not set(desc.parents()) - set([master]) + or master in part_masters # already in cache ): continue - master_ft = dj.FreeTable(self.connection, master_name) - if is_merge_table(master_ft): - merge_tables[master_name] = master_ft - if master_name not in visited: - visited.add(master_name) - search_descendants(master_ft) + if master not in part_masters: + part_masters.add(master) + search_descendants(dj.FreeTable(self.connection, master)) try: _ = search_descendants(self) except NetworkXError: try: # Attempt to import missing table - self._import_merge_tables() + self._import_part_masters() _ = search_descendants(self) except NetworkXError as e: table_name = "".join(e.args[0].split("`")[1:4]) raise ValueError(f"Please import {table_name} and try again.") logger.info( - f"Building merge cache for {self.camel_name}.\n\t" - + f"Found {len(merge_tables)} downstream merge tables" + f"Building part-parent cache for {self.camel_name}.\n\t" + + f"Found {len(part_masters)} downstream part tables" ) - return merge_tables - - @cached_property - def _merge_chains(self) -> OrderedDict[str, List[dj.FreeTable]]: - """Dict of chains to merges downstream of self + return part_masters - Format: {full_table_name: TableChains}. - - For each merge table found in _merge_tables, find the path from self to - merge via merge parts. If the path is valid, add it to the dict. Cache - prevents need to recompute whenever delete_downstream_merge is called - with a new restriction. To recompute, add `reload_cache=True` to - delete_downstream_merge call. + def _commit_downstream_delete(self, down_fts, start=None, **kwargs): """ - from spyglass.utils.dj_graph import TableChains # noqa F401 - - merge_chains = {} - for name, merge_table in self._merge_tables.items(): - chains = TableChains(self, merge_table) - if len(chains): - merge_chains[name] = chains - - # This is ordered by max_len of chain from self to merge, which assumes - # that the merge table with the longest chain is the most downstream. - # A more sophisticated approach would order by length from self to - # each merge part independently, but this is a good first approximation. - - return OrderedDict( - sorted( - merge_chains.items(), key=lambda x: x[1].max_len, reverse=True - ) - ) + Commit delete of downstream parts via down_fts. Logs with _log_delete. - def _get_chain(self, substring): - """Return chain from self to merge table with substring in name.""" - for name, chain in self._merge_chains.items(): - if substring.lower() in name: - return chain - raise ValueError(f"No chain found with '{substring}' in name.") + Used by both delete_downstream_parts and cautious_delete. + """ + start = start or time() - def _commit_merge_deletes( - self, merge_join_dict: Dict[str, List[QueryExpression]], **kwargs - ) -> None: - """Commit merge deletes. + safemode = ( + dj.config.get("safemode", True) + if kwargs.get("safemode") is None + else kwargs["safemode"] + ) + _ = kwargs.pop("safemode", None) + + ran_deletes = True + if down_fts: + for down_ft in down_fts: + dj_logger.info( + f"Spyglass: Deleting {len(down_ft)} rows from " + + f"{down_ft.full_table_name}" + ) + if ( + self._test_mode + or not safemode + or user_choice("Commit deletes?", default="no") == "yes" + ): + for down_ft in down_fts: # safemode off b/c already checked + down_ft.delete(safemode=False, **kwargs) + else: + logger.info("Delete aborted.") + ran_deletes = False - Parameters - ---------- - merge_join_dict : Dict[str, List[QueryExpression]] - Dictionary of merge tables and their joins. Uses 'merge_id' primary - key to restrict delete. + self._log_delete(start, del_blob=down_fts if ran_deletes else None) - Extracted for use in cautious_delete and delete_downstream_merge.""" - for table_name, part_restr in merge_join_dict.items(): - table = self._merge_tables[table_name] - keys = [part.fetch(MERGE_PK, as_dict=True) for part in part_restr] - (table & keys).delete(**kwargs) + return ran_deletes - def delete_downstream_merge( + def delete_downstream_parts( self, restriction: str = None, dry_run: bool = True, reload_cache: bool = False, disable_warning: bool = False, - return_parts: bool = True, + return_graph: bool = False, + verbose: bool = False, **kwargs, - ) -> Union[List[QueryExpression], Dict[str, List[QueryExpression]]]: + ) -> List[dj.FreeTable]: """Delete downstream merge table entries associated with restriction. Requires caching of merge tables and links, which is slow on first call. @@ -402,72 +410,83 @@ def delete_downstream_merge( If True, reload merge cache. Default False. disable_warning : bool, optional If True, do not warn if no merge tables found. Default False. - return_parts : bool, optional - If True, return list of merge part entries to be deleted. Default + return_graph: bool, optional + If True, return RestrGraph object used to identify downstream + tables. Default False, return list of part FreeTables. True. If False, return dictionary of merge tables and their joins. + verbose : bool, optional + If True, call RestrGraph with verbose=True. Default False. **kwargs : Any Passed to datajoint.table.Table.delete. """ + from spyglass.utils.dj_graph import RestrGraph # noqa F401 + + start = time() + if reload_cache: - for attr in ["_merge_tables", "_merge_chains"]: - _ = self.__dict__.pop(attr, None) + _ = self.__dict__.pop("_part_masters", None) + _ = self._part_masters # load cache before loading graph restriction = restriction or self.restriction or True - merge_join_dict = {} - for name, chain in self._merge_chains.items(): - if join := chain.cascade(restriction, direction="down"): - merge_join_dict[name] = join + restr_graph = RestrGraph( + seed_table=self, + leaves={self.full_table_name: restriction}, + direction="down", + cascade=True, + verbose=verbose, + ) + + if return_graph: + return restr_graph + + down_fts = restr_graph.ft_from_list( + self._part_masters, sort_reverse=False + ) - if not merge_join_dict and not disable_warning: + if not down_fts and not disable_warning: logger.warning( - f"No merge deletes found w/ {self.camel_name} & " + f"No part deletes found w/ {self.camel_name} & " + f"{restriction}.\n\tIf this is unexpected, try importing " + " Merge table(s) and running with `reload_cache`." ) if dry_run: - return merge_join_dict.values() if return_parts else merge_join_dict + return down_fts - self._commit_merge_deletes(merge_join_dict, **kwargs) + self._commit_downstream_delete(down_fts, start, **kwargs) - def ddm( - self, - restriction: str = None, - dry_run: bool = True, - reload_cache: bool = False, - disable_warning: bool = False, - return_parts: bool = True, - *args, - **kwargs, + def ddp( + self, *args, **kwargs ) -> Union[List[QueryExpression], Dict[str, List[QueryExpression]]]: - """Alias for delete_downstream_merge.""" - return self.delete_downstream_merge( - restriction=restriction, - dry_run=dry_run, - reload_cache=reload_cache, - disable_warning=disable_warning, - return_parts=return_parts, - *args, - **kwargs, - ) + """Alias for delete_downstream_parts.""" + return self.delete_downstream_parts(*args, **kwargs) # ---------------------------- cautious_delete ---------------------------- @cached_property def _delete_deps(self) -> List[Table]: - """List of tables required for delete permission check. + """List of tables required for delete permission and orphan checks. LabMember, LabTeam, and Session are required for delete permission. + common_nwbfile.schema.external is required for deleting orphaned + external files. IntervalList is required for deleting orphaned interval + lists. Used to delay import of tables until needed, avoiding circular imports. Each of these tables inheits SpyglassMixin. """ - from spyglass.common import LabMember, LabTeam, Session # noqa F401 + from spyglass.common import ( # noqa F401 + IntervalList, + LabMember, + LabTeam, + Session, + ) + from spyglass.common.common_nwbfile import schema # noqa F401 self._session_pk = Session.primary_key[0] self._member_pk = LabMember.primary_key[0] - return [LabMember, LabTeam, Session] + return [LabMember, LabTeam, Session, schema.external, IntervalList] def _get_exp_summary(self): """Get summary of experimenters for session(s), including NULL. @@ -483,7 +502,7 @@ def _get_exp_summary(self): Summary of experimenters for session(s). """ - Session = self._delete_deps[-1] + Session = self._delete_deps[2] SesExp = Session.Experimenter # Not called in delete permission check, only bare _get_exp_summary @@ -506,7 +525,7 @@ def _session_connection(self): """Path from Session table to self. False if no connection found.""" from spyglass.utils.dj_graph import TableChain # noqa F401 - connection = TableChain(parent=self._delete_deps[-1], child=self) + connection = TableChain(parent=self._delete_deps[2], child=self) return connection if connection.has_link else False @cached_property @@ -532,7 +551,7 @@ def _check_delete_permission(self) -> None: Permission denied because (a) Session has no experimenter, or (b) user is not on a team with Session experimenter(s). """ - LabMember, LabTeam, Session = self._delete_deps + LabMember, LabTeam, Session, _, _ = self._delete_deps dj_user = dj.config["database.user"] if dj_user in LabMember().admin: # bypass permission check for admin @@ -575,16 +594,14 @@ def _check_delete_permission(self) -> None: logger.info(f"Queueing delete for session(s):\n{sess_summary}") @cached_property - def _usage_table(self): + def _cautious_del_tbl(self): """Temporary inclusion for usage tracking.""" from spyglass.common.common_usage import CautiousDelete return CautiousDelete() - def _log_delete(self, start, merge_deletes=None, super_delete=False): + def _log_delete(self, start, del_blob=None, super_delete=False): """Log use of cautious_delete.""" - if isinstance(merge_deletes, QueryExpression): - merge_deletes = merge_deletes.fetch(as_dict=True) safe_insert = dict( duration=time() - start, dj_user=dj.config["database.user"], @@ -593,21 +610,23 @@ def _log_delete(self, start, merge_deletes=None, super_delete=False): restr_str = "Super delete: " if super_delete else "" restr_str += "".join(self.restriction) if self.restriction else "None" try: - self._usage_table.insert1( + self._cautious_del_tbl.insert1( dict( **safe_insert, restriction=restr_str[:255], - merge_deletes=merge_deletes, + merge_deletes=del_blob, ) ) except (DataJointError, DataError): - self._usage_table.insert1( + self._cautious_del_tbl.insert1( dict(**safe_insert, restriction="Unknown") ) # TODO: Intercept datajoint delete confirmation prompt for merge deletes - def cautious_delete(self, force_permission: bool = False, *args, **kwargs): - """Delete table rows after checking user permission. + def cautious_delete( + self, force_permission: bool = False, dry_run=False, *args, **kwargs + ): + """Permission check, then delete potential orphans and table rows. Permission is granted to users listed as admin in LabMember table or to users on a team with with the Session experimenter(s). If the table @@ -615,56 +634,61 @@ def cautious_delete(self, force_permission: bool = False, *args, **kwargs): continues. If the Session has no experimenter, or if the user is not on a team with the Session experimenter(s), a PermissionError is raised. + Potential downstream orphans are deleted first. These are master tables + whose parts have foreign keys to descendants of self. Then, rows from + self are deleted. Last, Nwbfile and IntervalList externals are deleted. + Parameters ---------- force_permission : bool, optional Bypass permission check. Default False. + dry_run : bool, optional + Default False. If True, return items to be deleted as + Tuple[Upstream, Downstream, externals['raw'], externals['analysis']] + If False, delete items. *args, **kwargs : Any Passed to datajoint.table.Table.delete. """ start = time() + external, IntervalList = self._delete_deps[3], self._delete_deps[4] - if not force_permission: + if not force_permission or dry_run: self._check_delete_permission() - merge_deletes = self.delete_downstream_merge( + down_fts = self.delete_downstream_parts( dry_run=True, disable_warning=True, - return_parts=False, ) - safemode = ( - dj.config.get("safemode", True) - if kwargs.get("safemode") is None - else kwargs["safemode"] - ) + if dry_run: + return ( + down_fts, + IntervalList(), # cleanup func relies on downstream deletes + external["raw"].unused(), + external["analysis"].unused(), + ) - if merge_deletes: - for table, content in merge_deletes.items(): - count = sum([len(part) for part in content]) - dj_logger.info(f"Merge: Deleting {count} rows from {table}") - if ( - not self._test_mode - or not safemode - or user_choice("Commit deletes?", default="no") == "yes" - ): - self._commit_merge_deletes(merge_deletes, **kwargs) - else: - logger.info("Delete aborted.") - self._log_delete(start) - return + if not self._commit_downstream_delete(down_fts, start=start, **kwargs): + return # Abort delete based on user input + + super().delete(*args, **kwargs) # Confirmation here - super().delete(*args, **kwargs) # Additional confirm here + for ext_type in ["raw", "analysis"]: + external[ext_type].delete( + delete_external_files=True, display_progress=False + ) - self._log_delete(start=start, merge_deletes=merge_deletes) + _ = IntervalList().nightly_cleanup(dry_run=False) - def cdel(self, force_permission=False, *args, **kwargs): + self._log_delete(start=start, del_blob=down_fts) + + def cdel(self, *args, **kwargs): """Alias for cautious_delete.""" - self.cautious_delete(force_permission=force_permission, *args, **kwargs) + return self.cautious_delete(*args, **kwargs) - def delete(self, force_permission=False, *args, **kwargs): + def delete(self, *args, **kwargs): """Alias for cautious_delete, overwrites datajoint.table.Table.delete""" - self.cautious_delete(force_permission=force_permission, *args, **kwargs) + self.cautious_delete(*args, **kwargs) def super_delete(self, warn=True, *args, **kwargs): """Alias for datajoint.table.Table.delete.""" @@ -708,7 +732,7 @@ def populate(self, *restrictions, **kwargs): @cached_property def _spyglass_version(self): - """Get Spyglass version from dj.config.""" + """Get Spyglass version.""" from spyglass import __version__ as sg_version return ".".join(sg_version.split(".")[:3]) # Major.Minor.Patch @@ -905,8 +929,8 @@ def restrict_by( Returns ------- - Union[QueryExpression, FindKeyGraph] - Restricted version of present table or FindKeyGraph object. If + Union[QueryExpression, TableChain] + Restricted version of present table or TableChain object. If return_graph, use all_ft attribute to see all tables in cascade. """ from spyglass.utils.dj_graph import TableChain # noqa: F401 @@ -917,6 +941,8 @@ def restrict_by( try: ret = self.restrict(restriction) # Save time trying first if len(ret) < len(self): + # If it actually restricts, if not it might by a dict that + # is not a valid restriction, returned as True logger.warning("Restriction valid for this table. Using as is.") return ret except DataJointError: @@ -936,21 +962,26 @@ def restrict_by( direction=direction, search_restr=restriction, banned_tables=list(self._banned_search_tables), - allow_merge=True, cascade=True, verbose=verbose, **kwargs, ) + if not graph.found_restr: + return None + if return_graph: return graph ret = self & graph._get_restr(self.full_table_name) - if len(ret) == len(self) or len(ret) == 0: - logger.warning( - f"Failed to restrict with path: {graph.path_str}\n\t" - + "See `help(YourTable.restrict_by)`" - ) + warn_text = ( + f" after restrict with path: {graph.path_str}\n\t " + + "See `help(YourTable.restrict_by)`" + ) + if len(ret) == len(self): + logger.warning("Same length" + warn_text) + elif len(ret) == 0: + logger.warning("No entries" + warn_text) return ret diff --git a/tests/common/test_usage.py b/tests/common/test_usage.py new file mode 100644 index 000000000..71449b3e3 --- /dev/null +++ b/tests/common/test_usage.py @@ -0,0 +1,89 @@ +import pytest + + +@pytest.fixture(scope="session") +def export_tbls(common): + from spyglass.common.common_usage import Export, ExportSelection + + return ExportSelection(), Export() + + +@pytest.fixture(scope="session") +def gen_export_selection( + lfp, trodes_pos_v1, track_graph, export_tbls, populate_lfp +): + ExportSelection, _ = export_tbls + _ = populate_lfp + + ExportSelection.start_export(paper_id=1, analysis_id=1) + lfp.v1.LFPV1().fetch_nwb() + trodes_pos_v1.fetch() + ExportSelection.start_export(paper_id=1, analysis_id=2) + track_graph.fetch() + ExportSelection.stop_export() + + yield dict(paper_id=1) + + ExportSelection.stop_export() + ExportSelection.super_delete(warn=False, safemode=False) + + +def test_export_selection_files(gen_export_selection, export_tbls): + ExportSelection, _ = export_tbls + paper_key = gen_export_selection + + len_fi = len(ExportSelection * ExportSelection.File & paper_key) + assert len_fi == 1, "Selection files not captured correctly" + + +def test_export_selection_tables(gen_export_selection, export_tbls): + ExportSelection, _ = export_tbls + paper_key = gen_export_selection + + paper = ExportSelection * ExportSelection.Table & paper_key + len_tbl_1 = len(paper & dict(analysis_id=1)) + len_tbl_2 = len(paper & dict(analysis_id=2)) + assert len_tbl_1 == 2, "Selection tables not captured correctly" + assert len_tbl_2 == 1, "Selection tables not captured correctly" + + +def tests_export_selection_max_id(gen_export_selection, export_tbls): + ExportSelection, _ = export_tbls + _ = gen_export_selection + + exp_id = max(ExportSelection.fetch("export_id")) + got_id = ExportSelection._max_export_id(1) + assert exp_id == got_id, "Max export id not captured correctly" + + +@pytest.fixture(scope="session") +def populate_export(export_tbls, gen_export_selection): + _, Export = export_tbls + Export.populate_paper(**gen_export_selection) + key = (Export & gen_export_selection).fetch("export_id", as_dict=True) + + yield (Export.Table & key), (Export.File & key) + + Export.super_delete(warn=False, safemode=False) + + +def test_export_populate(populate_export): + table, file = populate_export + + assert len(file) == 4, "Export tables not captured correctly" + assert len(table) == 31, "Export files not captured correctly" + + +def test_invalid_export_id(export_tbls): + ExportSelection, _ = export_tbls + ExportSelection.start_export(paper_id=2, analysis_id=1) + with pytest.raises(RuntimeError): + ExportSelection.export_id = 99 + ExportSelection.stop_export() + + +def test_del_export_id(export_tbls): + ExportSelection, _ = export_tbls + ExportSelection.start_export(paper_id=2, analysis_id=1) + del ExportSelection.export_id + assert ExportSelection.export_id == 0, "Export id not reset correctly" diff --git a/tests/utils/conftest.py b/tests/utils/conftest.py index a4bc7f900..726b6b8a7 100644 --- a/tests/utils/conftest.py +++ b/tests/utils/conftest.py @@ -30,23 +30,14 @@ def schema_test(teardown, dj_conn): @pytest.fixture(scope="module") -def chains(Nwbfile): - """Return example TableChains object from Nwbfile.""" - from spyglass.lfp.lfp_merge import LFPOutput # noqa: F401 +def chain(Nwbfile): + """Return example TableChain object from chains.""" from spyglass.linearization.merge import ( LinearizedPositionOutput, ) # noqa: F401 - from spyglass.position.position_merge import PositionOutput # noqa: F401 - - _ = LFPOutput, LinearizedPositionOutput, PositionOutput - - yield Nwbfile._get_chain("linear") - + from spyglass.utils.dj_graph import TableChain -@pytest.fixture(scope="module") -def chain(chains): - """Return example TableChain object from chains.""" - yield chains[0] + yield TableChain(Nwbfile, LinearizedPositionOutput) @pytest.fixture(scope="module") diff --git a/tests/utils/test_chains.py b/tests/utils/test_chains.py index 66d9772c3..093ed5485 100644 --- a/tests/utils/test_chains.py +++ b/tests/utils/test_chains.py @@ -13,27 +13,6 @@ def full_to_camel(t): return to_camel_case(t.split(".")[-1].strip("`")) -def test_chains_repr(chains): - """Test that the repr of a TableChains object is as expected.""" - repr_got = repr(chains) - chain_st = ",\n\t".join([str(c) for c in chains.chains]) + "\n" - repr_exp = f"TableChains(\n\t{chain_st})" - assert repr_got == repr_exp, "Unexpected repr of TableChains object." - - -def test_str_getitem(chains): - """Test getitem of TableChains object.""" - by_int = chains[0] - by_str = chains[chains.part_names[0]] - assert by_int == by_str, "Getitem by int and str not equal." - - -def test_invalid_chain(Nwbfile, pos_merge_tables, TableChain): - """Test that an invalid chain raises an error.""" - with pytest.raises(TypeError): - TableChain(Nwbfile, pos_merge_tables[0]) - - def test_chain_str(chain): """Test that the str of a TableChain object is as expected.""" chain = chain @@ -64,8 +43,8 @@ def test_chain_len(chain): def test_chain_getitem(chain): """Test getitem of TableChain object.""" - by_int = chain[0] - by_str = chain[chain.path[0]] + by_int = str(chain[0]) + by_str = str(chain[chain.restr_ft[0].full_table_name]) assert by_int == by_str, "Getitem by int and str not equal." @@ -76,3 +55,9 @@ def test_nolink_join(no_link_chain): def test_chain_str_no_link(no_link_chain): """Test that the str of a TableChain object with no link is as expected.""" assert str(no_link_chain) == "No link", "Unexpected str of no link chain." + assert repr(no_link_chain) == "No link", "Unexpected repr of no link chain." + + +def test_invalid_chain(TableChain): + with pytest.raises(ValueError): + TableChain() diff --git a/tests/utils/test_graph.py b/tests/utils/test_graph.py index 18899e147..c51427810 100644 --- a/tests/utils/test_graph.py +++ b/tests/utils/test_graph.py @@ -1,4 +1,7 @@ import pytest +from datajoint.utils import to_camel_case + +from tests.conftest import VERBOSE @pytest.fixture(scope="session") @@ -14,8 +17,7 @@ def restr_graph(leaf, verbose, lin_merge_key): yield RestrGraph( seed_table=leaf, - table_name=leaf.full_table_name, - restriction=True, + leaves={leaf.full_table_name: True}, cascade=True, verbose=verbose, ) @@ -26,13 +28,19 @@ def test_rg_repr(restr_graph, leaf): repr_got = repr(restr_graph) assert "cascade" in repr_got.lower(), "Cascade not in repr." - assert leaf.full_table_name in repr_got, "Table name not in repr." + + assert to_camel_case(leaf.table_name) in repr_got, "Table name not in repr." + + +def test_rg_len(restr_graph): + assert len(restr_graph) == len( + restr_graph.restr_ft + ), "Unexpected length of RestrGraph." def test_rg_ft(restr_graph): """Test FreeTable attribute of RestrGraph.""" assert len(restr_graph.leaf_ft) == 1, "Unexpected # of leaf tables." - assert len(restr_graph["spatial"]) == 2, "Unexpected cascaded table length." def test_rg_restr_ft(restr_graph): @@ -43,8 +51,41 @@ def test_rg_restr_ft(restr_graph): def test_rg_file_paths(restr_graph): """Test collection of upstream file paths.""" - paths = [p.get("file_path") for p in restr_graph.file_paths] - assert len(paths) == 2, "Unexpected number of file paths." + assert len(restr_graph.file_paths) == 2, "Unexpected number of file paths." + + +def test_rg_invalid_table(restr_graph): + """Test that an invalid table raises an error.""" + with pytest.raises(ValueError): + restr_graph._get_node("invalid_table") + + +def test_rg_invalid_edge(restr_graph, Nwbfile, common): + """Test that an invalid edge raises an error.""" + with pytest.raises(ValueError): + restr_graph._get_edge(Nwbfile, common.common_behav.PositionSource) + + +def test_rg_restr_subset(restr_graph, leaf): + prev_ft = restr_graph._get_ft(leaf.full_table_name, with_restr=True) + + restr_graph._set_restr(leaf, restriction=False) + + new_ft = restr_graph._get_ft(leaf.full_table_name, with_restr=True) + assert len(prev_ft) == len(new_ft), "Subset sestriction changed length." + + +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy") +def test_rg_no_restr(caplog, restr_graph, common): + restr_graph._set_restr(common.LabTeam, restriction=False) + restr_graph._get_ft(common.LabTeam.full_table_name, with_restr=True) + assert "No restr" in caplog.text, "No warning logged on no restriction." + + +def test_rg_invalid_direction(restr_graph, leaf): + """Test that an invalid direction raises an error.""" + with pytest.raises(ValueError): + restr_graph._get_next_tables(leaf.full_table_name, "invalid_direction") @pytest.fixture(scope="session") @@ -79,8 +120,7 @@ def restr_graph_root(restr_graph, common, lfp_band, lin_v1, frequent_imports): yield RestrGraph( seed_table=common.Session(), - table_name=common.Session.full_table_name, - restriction="True", + leaves={common.Session.full_table_name: "True"}, direction="down", cascade=True, verbose=False, @@ -89,7 +129,7 @@ def restr_graph_root(restr_graph, common, lfp_band, lin_v1, frequent_imports): def test_rg_root(restr_graph_root): assert ( - len(restr_graph_root["trodes_pos_v1"]) == 2 + len(restr_graph_root["trodes_pos_v1"]) >= 1 ), "Incomplete cascade from root." @@ -125,6 +165,52 @@ def test_restr_from_downstream(graph_tables, table, restr, expect_n, msg): assert len(graph_tables[table]() << restr) == expect_n, msg +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +def test_ban_node(caplog, graph_tables): + search_restr = "sk_attr > 17" + ParentNode = graph_tables["ParentNode"]() + SkNode = graph_tables["SkNode"]() + + ParentNode.ban_search_table(SkNode) + ParentNode >> search_restr + assert "could not be applied" in caplog.text, "Found banned table." + + ParentNode.see_banned_tables() + assert "Banned tables" in caplog.text, "Banned tables not logged." + + ParentNode.unban_search_table(SkNode) + assert len(ParentNode >> search_restr) == 3, "Unban failed." + + +def test_null_restrict_by(graph_tables): + PkNode = graph_tables["PkNode"]() + assert (PkNode >> True) == PkNode, "Null restriction failed." + + +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +def test_restrict_by_this_table(caplog, graph_tables): + PkNode = graph_tables["PkNode"]() + PkNode >> "pk_id > 4" + assert "valid for" in caplog.text, "No warning logged without search." + + +def test_invalid_restr_direction(graph_tables): + PkNode = graph_tables["PkNode"]() + with pytest.raises(ValueError): + PkNode.restrict_by("bad_attr > 0", direction="invalid_direction") + + +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +def test_warn_nonrestrict(caplog, graph_tables): + ParentNode = graph_tables["ParentNode"]() + restr_parent = ParentNode & "parent_id > 4 AND parent_id < 9" + + restr_parent >> "sk_id > 0" + assert "Same length" in caplog.text, "No warning logged on non-restrict." + restr_parent >> "sk_id > 99" + assert "No entries" in caplog.text, "No warning logged on non-restrict." + + def test_restr_many_to_one(graph_tables_many_to_one): PK = graph_tables_many_to_one["PkSkNode"]() OP = graph_tables_many_to_one["OtherParentNode"]() @@ -139,7 +225,35 @@ def test_restr_many_to_one(graph_tables_many_to_one): ), "Error accepting list of dicts for `>>` for many to one." -def test_restr_invalid(graph_tables): +def test_restr_invalid_err(graph_tables): PkNode = graph_tables["PkNode"]() with pytest.raises(ValueError): len(PkNode << set(["parent_attr > 15", "parent_attr < 20"])) + + +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +def test_restr_invalid(caplog, graph_tables): + graph_tables["PkNode"]() << "invalid_restr=1" + assert ( + "could not be applied" in caplog.text + ), "No warning logged on invalid restr." + + +@pytest.fixture(scope="session") +def direction(): + from spyglass.utils.dj_graph import Direction + + yield Direction + + +def test_direction_str(direction): + assert str(direction.UP) == "up", "Direction str not as expected." + + +def test_direction_invert(direction): + assert ~direction.UP == direction("down"), "Direction inversion failed." + + +def test_direction_bool(direction): + assert bool(direction.UP), "Direction bool not as expected." + assert not direction.NONE, "Direction bool not as expected." diff --git a/tests/utils/test_merge.py b/tests/utils/test_merge.py index 9c192c20a..2876555a1 100644 --- a/tests/utils/test_merge.py +++ b/tests/utils/test_merge.py @@ -35,6 +35,25 @@ def test_nwb_table_missing(BadMerge, caplog, schema_test): assert "non-default definition" in txt, "Warning not caught." +@pytest.fixture(scope="function") +def NonMerge(): + from spyglass.utils import SpyglassMixin + + class NonMerge(SpyglassMixin, dj.Manual): + definition = """ + merge_id : uuid + --- + source : varchar(32) + """ + + yield NonMerge + + +def test_non_merge(NonMerge): + with pytest.raises(AttributeError): + NonMerge() + + def test_part_camel(merge_table): example_part = merge_table.parts(camel_case=True)[0] assert "_" not in example_part, "Camel case not applied." diff --git a/tests/utils/test_mixin.py b/tests/utils/test_mixin.py index 93d13407a..a35041013 100644 --- a/tests/utils/test_mixin.py +++ b/tests/utils/test_mixin.py @@ -8,10 +8,11 @@ def Mixin(): from spyglass.utils import SpyglassMixin - class Mixin(SpyglassMixin, dj.Manual): + class Mixin(SpyglassMixin, dj.Lookup): definition = """ id : int """ + contents = [(0,), (1,)] yield Mixin @@ -32,64 +33,92 @@ def test_nwb_table_missing(schema_test, Mixin): Mixin().fetch_nwb() -def test_merge_detect(Nwbfile, pos_merge_tables): +def test_auto_increment(schema_test, Mixin): + schema_test(Mixin) + ret = Mixin()._auto_increment(key={}, pk="id") + assert ret["id"] == 2, "Auto increment not working." + + +def test_null_file_like(schema_test, Mixin): + schema_test(Mixin) + ret = Mixin().file_like(None) + assert len(ret) == len(Mixin()), "Null file_like not working." + + +@pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") +def test_bad_file_like(caplog, schema_test, Mixin): + schema_test(Mixin) + Mixin().file_like("BadName") + assert "No file_like field" in caplog.text, "No warning issued." + + +def test_partmaster_detect(Nwbfile, pos_merge_tables): """Test that the mixin can detect merge children of merge.""" - merges_found = set(Nwbfile._merge_chains.keys()) - merges_expected = set([t.full_table_name for t in pos_merge_tables]) - assert merges_expected.issubset( - merges_found - ), "Merges not detected by mixin." + assert len(Nwbfile._part_masters) >= 14, "Part masters not detected." -def test_merge_chain_join( - Nwbfile, pos_merge_tables, lin_v1, lfp_merge_key, populate_dlc +def test_downstream_restrict( + Nwbfile, frequent_imports, pos_merge_tables, lin_v1, lfp_merge_key ): - """Test that the mixin can join merge chains. + """Test that the mixin can join merge chains.""" - NOTE: This will change if more data is added to merge tables.""" - _ = lin_v1, lfp_merge_key, populate_dlc # merge tables populated + _ = frequent_imports # graph for cascade + _ = lin_v1, lfp_merge_key # merge tables populated - all_chains = [ - chains.cascade(True, direction="down") - for chains in Nwbfile._merge_chains.values() - ] - end_len = [len(chain) for chain in all_chains] + restr_ddp = Nwbfile.ddp(dry_run=True, reload_cache=True) + end_len = [len(ft) for ft in restr_ddp] - assert sum(end_len) >= 3, "Merge chains not joined correctly." + assert sum(end_len) >= 8, "Downstream parts not restricted correctly." -def test_get_chain(Nwbfile, pos_merge_tables): +def test_get_downstream_merge(Nwbfile, pos_merge_tables): """Test that the mixin can get the chain of a merge.""" - lin_parts = Nwbfile._get_chain("linear").part_names - lin_output = pos_merge_tables[1] - assert lin_parts == lin_output.parts(), "Chain not found." + lin_output = pos_merge_tables[1].full_table_name + assert lin_output in Nwbfile._part_masters, "Merge not found." @pytest.mark.skipif(not VERBOSE, reason="No logging to test when quiet-spy.") -def test_ddm_warning(Nwbfile, caplog): +def test_ddp_warning(Nwbfile, caplog): """Test that the mixin warns on empty delete_downstream_merge.""" - (Nwbfile.file_like("BadName")).delete_downstream_merge( + (Nwbfile.file_like("BadName")).delete_downstream_parts( reload_cache=True, disable_warnings=False ) - assert "No merge deletes found" in caplog.text, "No warning issued." + assert "No part deletes found" in caplog.text, "No warning issued." -def test_ddm_dry_run(Nwbfile, common, sgp, pos_merge_tables, lin_v1): +def test_ddp_dry_run( + Nwbfile, frequent_imports, common, sgp, pos_merge_tables, lin_v1 +): """Test that the mixin can dry run delete_downstream_merge.""" _ = lin_v1 # merge tables populated + _ = frequent_imports # graph for cascade + pos_output_name = pos_merge_tables[0].full_table_name param_field = "trodes_pos_params_name" trodes_params = sgp.v1.TrodesPosParams() - rft = (trodes_params & f'{param_field} LIKE "%ups%"').ddm( - reload_cache=True, dry_run=True, return_parts=False - )[pos_output_name][0] - assert len(rft) == 1, "ddm did not return restricted table." + rft = [ + table + for table in (trodes_params & f'{param_field} LIKE "%ups%"').ddp( + reload_cache=True, dry_run=True + ) + if table.full_table_name == pos_output_name + ] + assert len(rft) == 1, "ddp did not return restricted table." + + +def test_exp_summary(Nwbfile): + fields = Nwbfile._get_exp_summary().heading.names + expected = ["nwb_file_name", "lab_member_name"] + assert fields == expected, "Exp summary fields not as expected." - table_name = [p for p in pos_merge_tables[0].parts() if "trode" in p][0] - assert table_name == rft.full_table_name, "ddm didn't grab right table." - assert ( - rft.fetch1(param_field) == "single_led_upsampled" - ), "ddm didn't grab right row." +def test_cautious_del_dry_run(Nwbfile, frequent_imports): + _ = frequent_imports # part of cascade, need import + ret = Nwbfile.cdel(dry_run=True) + part_master_names = [t.full_table_name for t in ret[0]] + part_masters = Nwbfile._part_masters + assert all( + [pm in part_masters for pm in part_master_names] + ), "Non part masters found in cautious delete dry run." From 3ba5d0a50bee913e62ed15890b2a3515c469bc37 Mon Sep 17 00:00:00 2001 From: Samuel Bray Date: Thu, 27 Jun 2024 08:48:32 -0700 Subject: [PATCH 59/60] remove kachery_client dependency (#1014) --- CHANGELOG.md | 1 + .../v0/figurl_views/SpikeSortingRecordingView.py | 7 ++----- .../spikesorting/v0/figurl_views/SpikeSortingView.py | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0092789c2..c304baf07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ PositionIntervalMap.alter() - Remove unused `UnitInclusionParameters` table from `spikesorting.v0` #1003 - Fix bug in identification of artifact samples to be zeroed out in `spikesorting.v1.SpikeSorting` #1009 + - Remove deprecated dependencies on kachery_client #1014 ## [0.5.2] (April 22, 2024) diff --git a/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingRecordingView.py b/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingRecordingView.py index 0c0e4a9e1..aedb83522 100644 --- a/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingRecordingView.py +++ b/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingRecordingView.py @@ -2,7 +2,7 @@ from typing import List, Union import datajoint as dj -import kachery_client as kc +import kachery_cloud as kcl import numpy as np import spikeinterface as si from sortingview.SpikeSortingView import create_raw_traces_plot @@ -103,9 +103,6 @@ def create_mountain_layout( def _upload_data_and_return_sha1(data): - data_uri = kc.store_json(data) + data_uri = kcl.store_json(data) data_hash = data_uri.split("/")[2] - kc.upload_file( - data_uri, channel=os.environ["FIGURL_CHANNEL"], single_chunk=True - ) return data_hash diff --git a/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingView.py b/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingView.py index d05b61f1a..45d498565 100644 --- a/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingView.py +++ b/src/spyglass/spikesorting/v0/figurl_views/SpikeSortingView.py @@ -1,5 +1,5 @@ import datajoint as dj -import kachery_client as kc +import kachery_cloud as kcl import spikeinterface as si from sortingview.SpikeSortingView import ( SpikeSortingView as SortingViewSpikeSortingView, @@ -38,7 +38,7 @@ def make(self, key): recording: si.BaseRecording = si.load_extractor(recording_path) sorting: si.BaseSorting = si.load_extractor(sorting_path) - with kc.TemporaryDirectory() as tmpdir: + with kcl.TemporaryDirectory() as tmpdir: fname = f"{tmpdir}/spikesortingview.h5" logger.info("Preparing spikesortingview data") prepare_spikesortingview_data( From 14daa3b4c5e33e5989381a64a65a3b456ad26e46 Mon Sep 17 00:00:00 2001 From: Sam Bray Date: Mon, 1 Jul 2024 14:31:20 -0700 Subject: [PATCH 60/60] add default config to arguments --- src/spyglass/common/common_device.py | 6 +++--- src/spyglass/common/common_lab.py | 6 +++--- src/spyglass/common/common_session.py | 4 ++-- src/spyglass/common/common_subject.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/spyglass/common/common_device.py b/src/spyglass/common/common_device.py index 30916bebc..ea71e4b3e 100644 --- a/src/spyglass/common/common_device.py +++ b/src/spyglass/common/common_device.py @@ -36,7 +36,7 @@ class DataAcquisitionDevice(SpyglassMixin, dj.Manual): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert data acquisition devices from an NWB file. Note that this does not link the DataAcquisitionDevices with a Session. @@ -252,7 +252,7 @@ class CameraDevice(SpyglassMixin, dj.Manual): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert camera devices from an NWB file Parameters @@ -356,7 +356,7 @@ class Electrode(SpyglassMixin, dj.Part): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert probe devices from an NWB file. Parameters diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index a11f2c221..be847d79c 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -42,7 +42,7 @@ class LabMemberInfo(SpyglassMixin, dj.Part): _admin = [] @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert lab member information from an NWB file. Parameters @@ -245,7 +245,7 @@ class Institution(SpyglassMixin, dj.Manual): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert institution information from an NWB file. Parameters @@ -284,7 +284,7 @@ class Lab(SpyglassMixin, dj.Manual): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Insert lab name information from an NWB file. Parameters diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index 2ed33cb22..893b727b5 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -130,7 +130,7 @@ def make(self, key): self._add_data_acquisition_device_part(nwb_file_name, nwbf, config) self._add_experimenter_part(nwb_file_name, nwbf, config) - def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config): + def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config={}): # get device names from both the NWB file and the associated config file device_names, _, _ = DataAcquisitionDevice.get_all_device_names( nwbf, config @@ -152,7 +152,7 @@ def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config): key["data_acquisition_device_name"] = device_name Session.DataAcquisitionDevice.insert1(key) - def _add_experimenter_part(self, nwb_file_name, nwbf, config): + def _add_experimenter_part(self, nwb_file_name, nwbf, config={}): # Use config file over nwb file if members := config.get("LabMember"): experimenter_list = [ diff --git a/src/spyglass/common/common_subject.py b/src/spyglass/common/common_subject.py index 1b9355713..a6320f7e5 100644 --- a/src/spyglass/common/common_subject.py +++ b/src/spyglass/common/common_subject.py @@ -18,7 +18,7 @@ class Subject(SpyglassMixin, dj.Manual): """ @classmethod - def insert_from_nwbfile(cls, nwbf, config): + def insert_from_nwbfile(cls, nwbf, config={}): """Get the subject info from the NWBFile, insert into the Subject. Parameters