From 1d4eaaf229c9a7222cd4aec1a1bcf8b6ad37808c Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 31 Oct 2024 16:43:20 -0400 Subject: [PATCH] Run ruff --fix. --- .circleci/get_data.py | 2 +- .maint/update_authors.py | 112 ++--- aslprep/__about__.py | 14 +- aslprep/__init__.py | 8 +- aslprep/__main__.py | 6 +- aslprep/_warnings.py | 6 +- aslprep/cli/aggregate_qc.py | 17 +- aslprep/cli/parser.py | 556 ++++++++++++------------ aslprep/cli/run.py | 78 ++-- aslprep/cli/version.py | 14 +- aslprep/cli/workflow.py | 108 ++--- aslprep/config.py | 214 ++++----- aslprep/data/__init__.py | 20 +- aslprep/interfaces/__init__.py | 18 +- aslprep/interfaces/ants.py | 24 +- aslprep/interfaces/bids.py | 46 +- aslprep/interfaces/cbf.py | 412 +++++++++--------- aslprep/interfaces/confounds.py | 414 +++++++++--------- aslprep/interfaces/parcellation.py | 56 +-- aslprep/interfaces/plotting.py | 72 +-- aslprep/interfaces/reference.py | 46 +- aslprep/interfaces/reports.py | 116 ++--- aslprep/interfaces/utility.py | 112 ++--- aslprep/tests/conftest.py | 44 +- aslprep/tests/run_local_tests.py | 46 +- aslprep/tests/test_cli.py | 368 ++++++++-------- aslprep/tests/test_interfaces_cbf.py | 172 ++++---- aslprep/tests/test_parser.py | 68 +-- aslprep/tests/test_version.py | 52 +-- aslprep/tests/testing.py | 10 +- aslprep/tests/tests.py | 12 +- aslprep/tests/utils.py | 122 +++--- aslprep/utils/__init__.py | 16 +- aslprep/utils/asl.py | 70 +-- aslprep/utils/atlas.py | 60 +-- aslprep/utils/bids.py | 287 ++++++------ aslprep/utils/cbf.py | 64 +-- aslprep/utils/confounds.py | 24 +- aslprep/utils/misc.py | 31 +- aslprep/utils/plotting.py | 46 +- aslprep/utils/sentry.py | 102 ++--- aslprep/workflows/__init__.py | 4 +- aslprep/workflows/asl/__init__.py | 20 +- aslprep/workflows/asl/apply.py | 112 ++--- aslprep/workflows/asl/base.py | 517 +++++++++++----------- aslprep/workflows/asl/cbf.py | 518 +++++++++++----------- aslprep/workflows/asl/confounds.py | 404 ++++++++--------- aslprep/workflows/asl/fit.py | 492 ++++++++++----------- aslprep/workflows/asl/hmc.py | 80 ++-- aslprep/workflows/asl/outputs.py | 625 +++++++++++++-------------- aslprep/workflows/asl/plotting.py | 266 ++++++------ aslprep/workflows/asl/reference.py | 56 +-- aslprep/workflows/asl/resampling.py | 264 +++++------ aslprep/workflows/base.py | 348 +++++++-------- docs/conf.py | 152 +++---- docs/sphinxext/github_link.py | 20 +- pyproject.toml | 4 + 57 files changed, 3973 insertions(+), 3974 deletions(-) diff --git a/.circleci/get_data.py b/.circleci/get_data.py index a7dfa6b16..ca7ca09c3 100644 --- a/.circleci/get_data.py +++ b/.circleci/get_data.py @@ -4,7 +4,7 @@ from aslprep.tests.utils import download_test_data -if __name__ == "__main__": +if __name__ == '__main__': data_dir = sys.argv[1] dset = sys.argv[2] download_test_data(dset, data_dir) diff --git a/.maint/update_authors.py b/.maint/update_authors.py index f312e55e5..613a882a9 100644 --- a/.maint/update_authors.py +++ b/.maint/update_authors.py @@ -36,19 +36,19 @@ def read_md_table(md_text): keys = None retval = [] for line in md_text.splitlines(): - if line.strip().startswith("| --- |"): - keys = (k.replace("*", "").strip() for k in prev.split("|")) + if line.strip().startswith('| --- |'): + keys = (k.replace('*', '').strip() for k in prev.split('|')) keys = [k.lower() for k in keys if k] continue elif not keys: prev = line continue - if not line or not line.strip().startswith("|"): + if not line or not line.strip().startswith('|'): break - values = [v.strip() or None for v in line.split("|")][1:-1] - retval.append({k: v for k, v in zip(keys, values) if v}) + values = [v.strip() or None for v in line.split('|')][1:-1] + retval.append({k: v for k, v in zip(keys, values, strict=False) if v}) return retval @@ -56,10 +56,10 @@ def read_md_table(md_text): def sort_contributors(entries, git_lines, exclude=None, last=None): """Return a list of author dictionaries, ordered by contribution.""" last = last or [] - sorted_authors = sorted(entries, key=lambda i: i["name"]) + sorted_authors = sorted(entries, key=lambda i: i['name']) - first_last = [" ".join(val["name"].split(",")[::-1]).strip() for val in sorted_authors] - first_last_excl = [" ".join(val["name"].split(",")[::-1]).strip() for val in exclude or []] + first_last = [' '.join(val['name'].split(',')[::-1]).strip() for val in sorted_authors] + first_last_excl = [' '.join(val['name'].split(',')[::-1]).strip() for val in exclude or []] unmatched = [] author_matches = [] @@ -77,7 +77,7 @@ def sort_contributors(entries, git_lines, exclude=None, last=None): if val not in author_matches: author_matches.append(val) - names = {" ".join(val["name"].split(",")[::-1]).strip() for val in author_matches} + names = {' '.join(val['name'].split(',')[::-1]).strip() for val in author_matches} for missing_name in first_last: if missing_name not in names: missing = sorted_authors[first_last.index(missing_name)] @@ -85,7 +85,7 @@ def sort_contributors(entries, git_lines, exclude=None, last=None): position_matches = [] for i, item in enumerate(author_matches): - pos = item.pop("position", None) + pos = item.pop('position', None) if pos is not None: position_matches.append((i, int(pos))) @@ -97,7 +97,7 @@ def sort_contributors(entries, git_lines, exclude=None, last=None): return author_matches, unmatched -def get_git_lines(fname="line-contributors.txt"): +def get_git_lines(fname='line-contributors.txt'): """Run git-line-summary.""" import shutil import subprocess as sp @@ -106,15 +106,15 @@ def get_git_lines(fname="line-contributors.txt"): lines = [] if contrib_file.exists(): - print("WARNING: Reusing existing line-contributors.txt file.", file=sys.stderr) + print('WARNING: Reusing existing line-contributors.txt file.', file=sys.stderr) lines = contrib_file.read_text().splitlines() - git_line_summary_path = shutil.which("git-line-summary") + git_line_summary_path = shutil.which('git-line-summary') if not lines and git_line_summary_path: - print("Running git-line-summary on repo") + print('Running git-line-summary on repo') lines = sp.check_output([git_line_summary_path]).decode().splitlines() - lines = [l for l in lines if "Not Committed Yet" not in l] - contrib_file.write_text("\n".join(lines)) + lines = [l for l in lines if 'Not Committed Yet' not in l] + contrib_file.write_text('\n'.join(lines)) if not lines: raise RuntimeError( @@ -124,13 +124,13 @@ def get_git_lines(fname="line-contributors.txt"): git-line-summary not found, please install git-extras. """ * (git_line_summary_path is None) ) - return [" ".join(line.strip().split()[1:-1]) for line in lines if "%" in line] + return [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] def _namelast(inlist): retval = [] for i in inlist: - i["name"] = (f"{i.pop('name', '')} {i.pop('lastname', '')}").strip() + i['name'] = (f"{i.pop('name', '')} {i.pop('lastname', '')}").strip() retval.append(i) return retval @@ -142,13 +142,13 @@ def cli(): @cli.command() -@click.option("-z", "--zenodo-file", type=click.Path(exists=True), default=".zenodo.json") -@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md") +@click.option('-z', '--zenodo-file', type=click.Path(exists=True), default='.zenodo.json') +@click.option('-m', '--maintainers', type=click.Path(exists=True), default='.maint/MAINTAINERS.md') @click.option( - "-c", "--contributors", type=click.Path(exists=True), default=".maint/CONTRIBUTORS.md" + '-c', '--contributors', type=click.Path(exists=True), default='.maint/CONTRIBUTORS.md' ) -@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md") -@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md") +@click.option('--pi', type=click.Path(exists=True), default='.maint/PIs.md') +@click.option('-f', '--former-file', type=click.Path(exists=True), default='.maint/FORMER.md') def zenodo( zenodo_file, maintainers, @@ -175,12 +175,12 @@ def zenodo( zen_pi = _namelast( sorted( read_md_table(Path(pi).read_text()), - key=lambda v: (int(v.get("position", -1)), v.get("lastname")), + key=lambda v: (int(v.get('position', -1)), v.get('lastname')), ) ) - zenodo["creators"] = zen_creators - zenodo["contributors"] = zen_contributors + zen_pi + zenodo['creators'] = zen_creators + zenodo['contributors'] = zen_contributors + zen_pi misses = set(miss_creators).intersection(miss_contributors) if misses: @@ -190,30 +190,30 @@ def zenodo( ) # Remove position - for creator in zenodo["creators"]: - creator.pop("position", None) - creator.pop("handle", None) - if isinstance(creator["affiliation"], list): - creator["affiliation"] = creator["affiliation"][0] + for creator in zenodo['creators']: + creator.pop('position', None) + creator.pop('handle', None) + if isinstance(creator['affiliation'], list): + creator['affiliation'] = creator['affiliation'][0] - for creator in zenodo["contributors"]: - creator.pop("handle", None) - creator["type"] = "Researcher" - creator.pop("position", None) + for creator in zenodo['contributors']: + creator.pop('handle', None) + creator['type'] = 'Researcher' + creator.pop('position', None) - if isinstance(creator["affiliation"], list): - creator["affiliation"] = creator["affiliation"][0] + if isinstance(creator['affiliation'], list): + creator['affiliation'] = creator['affiliation'][0] - Path(zenodo_file).write_text("%s\n" % json.dumps(zenodo, indent=2)) + Path(zenodo_file).write_text('%s\n' % json.dumps(zenodo, indent=2)) @cli.command() -@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md") +@click.option('-m', '--maintainers', type=click.Path(exists=True), default='.maint/MAINTAINERS.md') @click.option( - "-c", "--contributors", type=click.Path(exists=True), default=".maint/CONTRIBUTORS.md" + '-c', '--contributors', type=click.Path(exists=True), default='.maint/CONTRIBUTORS.md' ) -@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md") -@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md") +@click.option('--pi', type=click.Path(exists=True), default='.maint/PIs.md') +@click.option('-f', '--former-file', type=click.Path(exists=True), default='.maint/FORMER.md') def publication( maintainers, contributors, @@ -234,12 +234,12 @@ def publication( pi_hits = _namelast( sorted( read_md_table(Path(pi).read_text()), - key=lambda v: (int(v.get("position", -1)), v.get("lastname")), + key=lambda v: (int(v.get('position', -1)), v.get('lastname')), ) ) - pi_names = [pi["name"] for pi in pi_hits] - hits = [hit for hit in hits if hit["name"] not in pi_names] + pi_hits + pi_names = [pi['name'] for pi in pi_hits] + hits = [hit for hit in hits if hit['name'] not in pi_names] + pi_hits def _aslist(value): if isinstance(value, (list, tuple)): @@ -249,16 +249,16 @@ def _aslist(value): # Remove position affiliations = [] for item in hits: - item.pop("position", None) - for a in _aslist(item.get("affiliation", "Unaffiliated")): + item.pop('position', None) + for a in _aslist(item.get('affiliation', 'Unaffiliated')): if a not in affiliations: affiliations.append(a) aff_indexes = [ - ", ".join( + ', '.join( [ - "%d" % (affiliations.index(a) + 1) - for a in _aslist(author.get("affiliation", "Unaffiliated")) + '%d' % (affiliations.index(a) + 1) + for a in _aslist(author.get('affiliation', 'Unaffiliated')) ] ) for author in hits @@ -270,18 +270,18 @@ def _aslist(value): file=sys.stderr, ) - print("Authors (%d):" % len(hits)) + print('Authors (%d):' % len(hits)) print( - "%s." - % "; ".join(["%s \\ :sup:`%s`\\ " % (i["name"], idx) for i, idx in zip(hits, aff_indexes)]) + '%s.' + % '; '.join(['%s \\ :sup:`%s`\\ ' % (i['name'], idx) for i, idx in zip(hits, aff_indexes, strict=False)]) ) print( - "\n\nAffiliations:\n%s" - % "\n".join(["{0: >2}. {1}".format(i + 1, a) for i, a in enumerate(affiliations)]) + '\n\nAffiliations:\n%s' + % '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) ) -if __name__ == "__main__": +if __name__ == '__main__': """Install entry-point""" cli() diff --git a/aslprep/__about__.py b/aslprep/__about__.py index ea260b2b7..7b113a7c2 100644 --- a/aslprep/__about__.py +++ b/aslprep/__about__.py @@ -4,14 +4,14 @@ try: from aslprep._version import __version__ except ImportError: - __version__ = "0+unknown" + __version__ = '0+unknown' -__packagename__ = "aslprep" -__copyright__ = "Copyright 2023, The ASLPrep Developers" +__packagename__ = 'aslprep' +__copyright__ = 'Copyright 2023, The ASLPrep Developers' __credits__ = ( - "Contributors: please check the ``.zenodo.json`` file at the top-level folder " - "of the repository." + 'Contributors: please check the ``.zenodo.json`` file at the top-level folder ' + 'of the repository.' ) -__url__ = "https://github.com/PennLINC/aslprep" +__url__ = 'https://github.com/PennLINC/aslprep' -DOWNLOAD_URL = f"https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz" +DOWNLOAD_URL = f'https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz' diff --git a/aslprep/__init__.py b/aslprep/__init__.py index 35c2275f2..c4aeb3bc3 100644 --- a/aslprep/__init__.py +++ b/aslprep/__init__.py @@ -5,8 +5,8 @@ from aslprep.__about__ import __copyright__, __credits__, __packagename__, __version__ __all__ = [ - "__copyright__", - "__credits__", - "__packagename__", - "__version__", + '__copyright__', + '__credits__', + '__packagename__', + '__version__', ] diff --git a/aslprep/__main__.py b/aslprep/__main__.py index f4cb018ef..6bff9a6d6 100644 --- a/aslprep/__main__.py +++ b/aslprep/__main__.py @@ -2,12 +2,12 @@ from aslprep.cli.run import main -if __name__ == "__main__": +if __name__ == '__main__': import sys from aslprep import __name__ as module # `python -m ` typically displays the command as __main__.py - if "__main__.py" in sys.argv[0]: - sys.argv[0] = f"{sys.executable} -m {module}" + if '__main__.py' in sys.argv[0]: + sys.argv[0] = f'{sys.executable} -m {module}' main() diff --git a/aslprep/_warnings.py b/aslprep/_warnings.py index 88bdad5d4..ef4677f04 100644 --- a/aslprep/_warnings.py +++ b/aslprep/_warnings.py @@ -3,7 +3,7 @@ import logging import warnings -_wlog = logging.getLogger("py.warnings") +_wlog = logging.getLogger('py.warnings') _wlog.addHandler(logging.NullHandler()) @@ -11,9 +11,9 @@ def _warn(message, category=None, stacklevel=1, source=None): """Redefine the warning function.""" if category is not None: category = type(category).__name__ - category = category.replace("type", "WARNING") + category = category.replace('type', 'WARNING') - logging.getLogger("py.warnings").warning(f"{category or 'WARNING'}: {message}") + logging.getLogger('py.warnings').warning(f"{category or 'WARNING'}: {message}") def _showwarning(message, category, filename, lineno, file=None, line=None): diff --git a/aslprep/cli/aggregate_qc.py b/aslprep/cli/aggregate_qc.py index 891dca5b0..d48377517 100644 --- a/aslprep/cli/aggregate_qc.py +++ b/aslprep/cli/aggregate_qc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Aggregate QC measures across all subjects in dataset.""" @@ -14,9 +13,9 @@ def get_parser(): parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) - parser.add_argument("aslprep_dir", action="store", type=Path, help="aslprep output dir") + parser.add_argument('aslprep_dir', action='store', type=Path, help='aslprep output dir') - parser.add_argument("output_prefix", action="store", type=str, help="output prefix for group") + parser.add_argument('output_prefix', action='store', type=str, help='output prefix for group') return parser @@ -25,23 +24,23 @@ def main(): opts = get_parser().parse_args() allsubj_dir = os.path.abspath(opts.aslprep_dir) - outputfile = os.getcwd() + "/" + str(opts.output_prefix) + "_allsubjects_qc.tsv" + outputfile = os.getcwd() + '/' + str(opts.output_prefix) + '_allsubjects_qc.tsv' qclist = [] for r, d, f in os.walk(allsubj_dir): for filex in f: - if filex.endswith("desc-qualitycontrol_cbf.tsv"): - qclist.append(r + "/" + filex) + if filex.endswith('desc-qualitycontrol_cbf.tsv'): + qclist.append(r + '/' + filex) datax = pd.read_table(qclist[0]) for i in range(1, len(qclist)): dy = pd.read_table(qclist[i]) datax = pd.concat([datax, dy]) - datax.to_csv(outputfile, index=None, sep="\t") + datax.to_csv(outputfile, index=None, sep='\t') -if __name__ == "__main__": +if __name__ == '__main__': raise RuntimeError( - "this should be run after running aslprep;\nit required installation of aslprep" + 'this should be run after running aslprep;\nit required installation of aslprep' ) diff --git a/aslprep/cli/parser.py b/aslprep/cli/parser.py index 86d736f9c..5155619da 100644 --- a/aslprep/cli/parser.py +++ b/aslprep/cli/parser.py @@ -20,14 +20,14 @@ def _build_parser(): def _path_exists(path, parser): """Ensure a given path exists.""" if path is None or not Path(path).exists(): - raise parser.error(f"Path does not exist: <{path}>.") + raise parser.error(f'Path does not exist: <{path}>.') return Path(path).absolute() def _is_file(path, parser): """Ensure a given path exists and it is a file.""" path = _path_exists(path, parser) if not path.is_file(): - raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.") + raise parser.error(f'Path should point to a file (or symlink of file): <{path}>.') return path def _min_one(value, parser): @@ -38,20 +38,20 @@ def _min_one(value, parser): return value def _to_gb(value): - scale = {"G": 1, "T": 10**3, "M": 1e-3, "K": 1e-6, "B": 1e-9} - digits = "".join([c for c in value if c.isdigit()]) - units = value[len(digits) :] or "M" + scale = {'G': 1, 'T': 10**3, 'M': 1e-3, 'K': 1e-6, 'B': 1e-9} + digits = ''.join([c for c in value if c.isdigit()]) + units = value[len(digits) :] or 'M' return int(digits) * scale[units[0]] def _drop_sub(value): - return value[4:] if value.startswith("sub-") else value + return value[4:] if value.startswith('sub-') else value def _process_value(value): import bids if value is None: return bids.layout.Query.NONE - elif value == "*": + elif value == '*': return bids.layout.Query.ANY else: return value @@ -73,16 +73,16 @@ def _bids_filter(value, parser): try: return loads(Path(value).read_text(), object_hook=_filter_pybids_none_any) except JSONDecodeError: - raise parser.error(f"JSON syntax error in: <{value}>.") + raise parser.error(f'JSON syntax error in: <{value}>.') else: - raise parser.error(f"Path does not exist: <{value}>.") + raise parser.error(f'Path does not exist: <{value}>.') - verstr = f"ASLPrep v{config.environment.version}" + verstr = f'ASLPrep v{config.environment.version}' currentv = Version(config.environment.version) is_release = not any((currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)) parser = ArgumentParser( - description=f"ASLPrep: ASL PREProcessing workflows v{config.environment.version}", + description=f'ASLPrep: ASL PREProcessing workflows v{config.environment.version}', formatter_class=ArgumentDefaultsHelpFormatter, ) PathExists = partial(_path_exists, parser=parser) @@ -94,47 +94,47 @@ def _bids_filter(value, parser): # required, positional arguments # IMPORTANT: they must go directly with the parser object parser.add_argument( - "bids_dir", - action="store", + 'bids_dir', + action='store', type=PathExists, help=( - "the root folder of a BIDS valid dataset (sub-XXXXX folders should " - "be found at the top level in this folder)." + 'the root folder of a BIDS valid dataset (sub-XXXXX folders should ' + 'be found at the top level in this folder).' ), ) parser.add_argument( - "output_dir", - action="store", + 'output_dir', + action='store', type=Path, - help="the output path for the outcomes of preprocessing and visual reports", + help='the output path for the outcomes of preprocessing and visual reports', ) parser.add_argument( - "analysis_level", - choices=["participant"], + 'analysis_level', + choices=['participant'], help=( 'processing stage to be run, only "participant" in the case of ' - "ASLPREP (see BIDS-Apps specification)." + 'ASLPREP (see BIDS-Apps specification).' ), ) # optional arguments - g_bids = parser.add_argument_group("Options for filtering BIDS queries") + g_bids = parser.add_argument_group('Options for filtering BIDS queries') g_bids.add_argument( - "--skip_bids_validation", - "--skip-bids-validation", - action="store_true", + '--skip_bids_validation', + '--skip-bids-validation', + action='store_true', default=False, - help="assume the input dataset is BIDS compliant and skip the validation", + help='assume the input dataset is BIDS compliant and skip the validation', ) g_bids.add_argument( - "--participant-label", - "--participant_label", - action="store", - nargs="+", + '--participant-label', + '--participant_label', + action='store', + nargs='+', type=_drop_sub, help=( - "A space delimited list of participant identifiers or a single identifier " - "(the sub- prefix can be removed)" + 'A space delimited list of participant identifiers or a single identifier ' + '(the sub- prefix can be removed)' ), ) # Re-enable when option is actually implemented @@ -147,11 +147,11 @@ def _bids_filter(value, parser): # "-t", "--task-id", action="store", help="select a specific task to be processed" # ) g_bids.add_argument( - "--bids-filter-file", - dest="bids_filters", - action="store", + '--bids-filter-file', + dest='bids_filters', + action='store', type=BIDSFilter, - metavar="FILE", + metavar='FILE', help=( "A JSON file describing custom BIDS input filters using PyBIDS. " "For further details, please check out " @@ -161,78 +161,78 @@ def _bids_filter(value, parser): ), ) g_bids.add_argument( - "-d", - "--derivatives", - action="store", - metavar="PATH", + '-d', + '--derivatives', + action='store', + metavar='PATH', type=Path, - nargs="*", - help="Search PATH(s) for pre-computed derivatives.", + nargs='*', + help='Search PATH(s) for pre-computed derivatives.', ) g_bids.add_argument( - "--bids-database-dir", - metavar="PATH", + '--bids-database-dir', + metavar='PATH', type=Path, help=( - "Path to a PyBIDS database folder, for faster indexing " - "(especially useful for large datasets). " - "Will be created if not present." + 'Path to a PyBIDS database folder, for faster indexing ' + '(especially useful for large datasets). ' + 'Will be created if not present.' ), ) - g_perfm = parser.add_argument_group("Options to handle performance") + g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument( - "--nprocs", - "--nthreads", - "--n_cpus", - "--n-cpus", - dest="nprocs", - action="store", + '--nprocs', + '--nthreads', + '--n_cpus', + '--n-cpus', + dest='nprocs', + action='store', type=PositiveInt, - help="maximum number of threads across all processes", + help='maximum number of threads across all processes', ) g_perfm.add_argument( - "--omp-nthreads", - action="store", + '--omp-nthreads', + action='store', type=PositiveInt, - help="maximum number of threads per-process", + help='maximum number of threads per-process', ) g_perfm.add_argument( - "--mem", - "--mem_mb", - "--mem-mb", - dest="memory_gb", - action="store", + '--mem', + '--mem_mb', + '--mem-mb', + dest='memory_gb', + action='store', type=_to_gb, - help="upper bound memory limit for ASLPrep processes", + help='upper bound memory limit for ASLPrep processes', ) g_perfm.add_argument( - "--low-mem", - action="store_true", - help="attempt to reduce memory usage (will increase disk usage in working directory)", + '--low-mem', + action='store_true', + help='attempt to reduce memory usage (will increase disk usage in working directory)', ) g_perfm.add_argument( - "--use-plugin", - "--nipype-plugin-file", - action="store", - metavar="FILE", + '--use-plugin', + '--nipype-plugin-file', + action='store', + metavar='FILE', type=IsFile, - help="nipype plugin configuration file", + help='nipype plugin configuration file', ) g_perfm.add_argument( - "--sloppy", - action="store_true", + '--sloppy', + action='store_true', default=False, - help="Use low-quality tools for speed - TESTING ONLY", + help='Use low-quality tools for speed - TESTING ONLY', ) - g_subset = parser.add_argument_group("Options for performing only a subset of the workflow") - g_subset.add_argument("--anat-only", action="store_true", help="run anatomical workflows only") + g_subset = parser.add_argument_group('Options for performing only a subset of the workflow') + g_subset.add_argument('--anat-only', action='store_true', help='run anatomical workflows only') g_subset.add_argument( - "--level", - action="store", - default="full", - choices=["minimal", "resampling", "full"], + '--level', + action='store', + default='full', + choices=['minimal', 'resampling', 'full'], help=( "Processing level; may be 'minimal' (nothing that can be recomputed), " "'resampling' (recomputable targets that aid in resampling) " @@ -240,15 +240,15 @@ def _bids_filter(value, parser): ), ) g_subset.add_argument( - "--boilerplate-only", - "--boilerplate_only", - action="store_true", + '--boilerplate-only', + '--boilerplate_only', + action='store_true', default=False, - help="generate boilerplate only", + help='generate boilerplate only', ) g_subset.add_argument( - "--reports-only", - action="store_true", + '--reports-only', + action='store_true', default=False, help=( "only generate reports, don't run workflows. This will only rerun report " @@ -256,22 +256,22 @@ def _bids_filter(value, parser): ), ) - g_conf = parser.add_argument_group("Workflow configuration") + g_conf = parser.add_argument_group('Workflow configuration') g_conf.add_argument( - "--ignore", + '--ignore', required=False, - action="store", - nargs="+", + action='store', + nargs='+', default=[], - choices=["fieldmaps", "sbref", "t2w", "flair", "fmap-jacobian"], + choices=['fieldmaps', 'sbref', 't2w', 'flair', 'fmap-jacobian'], help=( - "ignore selected aspects of the input dataset to disable corresponding " - "parts of the workflow (a space delimited list)" + 'ignore selected aspects of the input dataset to disable corresponding ' + 'parts of the workflow (a space delimited list)' ), ) g_conf.add_argument( - "--output-spaces", - nargs="*", + '--output-spaces', + nargs='*', action=OutputReferencesAction, help="""\ Standard and non-standard spaces to resample anatomical and functional images to. \ @@ -285,191 +285,191 @@ def _bids_filter(value, parser): any spatial references.""", ) g_conf.add_argument( - "--longitudinal", - action="store_true", - help="treat dataset as longitudinal - may increase runtime", + '--longitudinal', + action='store_true', + help='treat dataset as longitudinal - may increase runtime', ) g_conf.add_argument( - "--asl2t1w-init", - action="store", - default="register", - choices=["register", "header"], + '--asl2t1w-init', + action='store', + default='register', + choices=['register', 'header'], help=( 'Either "register" (the default) to initialize volumes at center or "header" ' - "to use the header information when coregistering ASL to T1w images." + 'to use the header information when coregistering ASL to T1w images.' ), ) g_conf.add_argument( - "--asl2t1w-dof", - action="store", + '--asl2t1w-dof', + action='store', default=6, choices=[6, 9, 12], type=int, help=( - "Degrees of freedom when registering ASL to T1w images. " - "6 degrees (rotation and translation) are used by default." + 'Degrees of freedom when registering ASL to T1w images. ' + '6 degrees (rotation and translation) are used by default.' ), ) g_use_bbr = g_conf.add_mutually_exclusive_group() g_use_bbr.add_argument( - "--force-bbr", - action="store_true", - dest="use_bbr", + '--force-bbr', + action='store_true', + dest='use_bbr', default=False, - help="Always use boundary-based registration (no goodness-of-fit checks)", + help='Always use boundary-based registration (no goodness-of-fit checks)', ) g_use_bbr.add_argument( - "--force-no-bbr", - action="store_false", - dest="use_bbr", + '--force-no-bbr', + action='store_false', + dest='use_bbr', default=None, - help="Do not use boundary-based registration (no goodness-of-fit checks)", + help='Do not use boundary-based registration (no goodness-of-fit checks)', ) g_conf.add_argument( - "--dummy-scans", + '--dummy-scans', required=False, - action="store", + action='store', default=None, type=int, help=( - "Number of non-steady-state volumes. " - "Note that this indicates the number of volumes, not the number of control-label " - "pairs in the ASL file." + 'Number of non-steady-state volumes. ' + 'Note that this indicates the number of volumes, not the number of control-label ' + 'pairs in the ASL file.' ), ) g_conf.add_argument( - "--random-seed", - dest="_random_seed", - action="store", + '--random-seed', + dest='_random_seed', + action='store', type=int, default=None, - help="Initialize the random seed for the workflow", + help='Initialize the random seed for the workflow', ) g_use_ge = g_conf.add_mutually_exclusive_group() g_use_ge.add_argument( - "--force-ge", - action="store_true", - dest="use_ge", + '--force-ge', + action='store_true', + dest='use_ge', default=None, - help="Always use boundary-based registration (no goodness-of-fit checks)", + help='Always use boundary-based registration (no goodness-of-fit checks)', ) g_use_ge.add_argument( - "--force-no-ge", - action="store_false", - dest="use_ge", + '--force-no-ge', + action='store_false', + dest='use_ge', default=None, - help="Do not use boundary-based registration (no goodness-of-fit checks)", + help='Do not use boundary-based registration (no goodness-of-fit checks)', ) g_conf.add_argument( - "--m0_scale", + '--m0_scale', required=False, - action="store", + action='store', default=1, type=float, help=( - "Relative scale between ASL and M0. " - "M0 scans are multiplied by m0_scale before calculating CBF. " - "It is important to note, however, that BIDS expects ASL and M0 data to scaled in " - "the raw dataset, so this parameter should only be used if your dataset does not " - "have pre-scaled data." + 'Relative scale between ASL and M0. ' + 'M0 scans are multiplied by m0_scale before calculating CBF. ' + 'It is important to note, however, that BIDS expects ASL and M0 data to scaled in ' + 'the raw dataset, so this parameter should only be used if your dataset does not ' + 'have pre-scaled data.' ), ) g_conf.add_argument( - "--smooth_kernel", - action="store", + '--smooth_kernel', + action='store', default=5, type=float, - help="Smoothing kernel for the M0 image(s)", + help='Smoothing kernel for the M0 image(s)', ) g_conf.add_argument( - "--scorescrub", - action="store_true", + '--scorescrub', + action='store_true', default=False, help="Apply Sudipto Dolui's algorithms for denoising CBF", ) g_conf.add_argument( - "--basil", - action="store_true", + '--basil', + action='store_true', default=False, help="FSL's CBF computation with spatial regularization and partial volume correction", ) - g_outputs = parser.add_argument_group("Options for modulating outputs") + g_outputs = parser.add_argument_group('Options for modulating outputs') g_outputs.add_argument( - "--medial-surface-nan", + '--medial-surface-nan', required=False, - action="store_true", + action='store_true', default=False, help=( - "Replace medial wall values with NaNs on functional GIFTI files. " - "Only performed for GIFTI files mapped to a freesurfer subject " - "(fsaverage or fsnative)." + 'Replace medial wall values with NaNs on functional GIFTI files. ' + 'Only performed for GIFTI files mapped to a freesurfer subject ' + '(fsaverage or fsnative).' ), ) g_conf.add_argument( - "--project-goodvoxels", + '--project-goodvoxels', required=False, - action="store_true", + action='store_true', default=False, help=( - "Exclude voxels whose timeseries have locally high coefficient of variation " - "from surface resampling. " - "Only performed for GIFTI files mapped to a freesurfer subject " - "(fsaverage or fsnative)." + 'Exclude voxels whose timeseries have locally high coefficient of variation ' + 'from surface resampling. ' + 'Only performed for GIFTI files mapped to a freesurfer subject ' + '(fsaverage or fsnative).' ), ) g_outputs.add_argument( - "--md-only-boilerplate", - action="store_true", + '--md-only-boilerplate', + action='store_true', default=False, - help="Skip generation of HTML and LaTeX formatted citation with pandoc", + help='Skip generation of HTML and LaTeX formatted citation with pandoc', ) g_outputs.add_argument( - "--cifti-output", - nargs="?", - const="91k", + '--cifti-output', + nargs='?', + const='91k', default=False, - choices=("91k", "170k"), + choices=('91k', '170k'), type=str, help=( - "Output preprocessed BOLD as a CIFTI dense timeseries. " - "Optionally, the number of grayordinate can be specified " - "(default is 91k, which equates to 2mm resolution)" + 'Output preprocessed BOLD as a CIFTI dense timeseries. ' + 'Optionally, the number of grayordinate can be specified ' + '(default is 91k, which equates to 2mm resolution)' ), ) g_outputs.add_argument( - "--no-msm", - action="store_false", - dest="run_msmsulc", - help="Disable Multimodal Surface Matching surface registration.", + '--no-msm', + action='store_false', + dest='run_msmsulc', + help='Disable Multimodal Surface Matching surface registration.', ) # ANTs options - g_ants = parser.add_argument_group("Specific options for ANTs registrations") + g_ants = parser.add_argument_group('Specific options for ANTs registrations') g_ants.add_argument( - "--skull-strip-template", - default="OASIS30ANTs", + '--skull-strip-template', + default='OASIS30ANTs', type=Reference.from_string, - help="select a template for skull-stripping with antsBrainExtraction", + help='select a template for skull-stripping with antsBrainExtraction', ) g_ants.add_argument( - "--skull-strip-fixed-seed", - action="store_true", + '--skull-strip-fixed-seed', + action='store_true', help=( - "do not use a random seed for skull-stripping - will ensure " - "run-to-run replicability when used with --omp-nthreads 1 and " - "matching --random-seed " + 'do not use a random seed for skull-stripping - will ensure ' + 'run-to-run replicability when used with --omp-nthreads 1 and ' + 'matching --random-seed ' ), ) g_ants.add_argument( - "--skull-strip-t1w", - action="store", - choices=("auto", "skip", "force"), - default="force", + '--skull-strip-t1w', + action='store', + choices=('auto', 'skip', 'force'), + default='force', help=( "determiner for T1-weighted skull stripping ('force' ensures skull " "stripping, 'skip' ignores skull stripping, and 'auto' applies brain extraction " @@ -478,141 +478,141 @@ def _bids_filter(value, parser): ) # Fieldmap options - g_fmap = parser.add_argument_group("Specific options for handling fieldmaps") + g_fmap = parser.add_argument_group('Specific options for handling fieldmaps') g_fmap.add_argument( - "--fmap-bspline", - action="store_true", + '--fmap-bspline', + action='store_true', default=False, - help="fit a B-Spline field using least-squares (experimental)", + help='fit a B-Spline field using least-squares (experimental)', ) g_fmap.add_argument( - "--fmap-no-demean", - action="store_false", + '--fmap-no-demean', + action='store_false', default=True, - help="do not remove median (within mask) from fieldmap", + help='do not remove median (within mask) from fieldmap', ) # SyN-unwarp options - g_syn = parser.add_argument_group("Specific options for SyN distortion correction") + g_syn = parser.add_argument_group('Specific options for SyN distortion correction') g_syn.add_argument( - "--use-syn-sdc", - action="store_true", + '--use-syn-sdc', + action='store_true', default=False, - help="EXPERIMENTAL: Use fieldmap-free distortion correction", + help='EXPERIMENTAL: Use fieldmap-free distortion correction', ) g_syn.add_argument( - "--force-syn", - action="store_true", + '--force-syn', + action='store_true', default=False, help=( - "EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to " - "fieldmap correction, if available" + 'EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to ' + 'fieldmap correction, if available' ), ) # FreeSurfer options - g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing") + g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing') g_fs.add_argument( - "--fs-license-file", - metavar="FILE", + '--fs-license-file', + metavar='FILE', type=IsFile, help=( - "Path to FreeSurfer license key file. Get it (for free) by registering " - "at https://surfer.nmr.mgh.harvard.edu/registration.html" + 'Path to FreeSurfer license key file. Get it (for free) by registering ' + 'at https://surfer.nmr.mgh.harvard.edu/registration.html' ), ) g_fs.add_argument( - "--fs-subjects-dir", - metavar="PATH", + '--fs-subjects-dir', + metavar='PATH', type=Path, help=( - "Path to existing FreeSurfer subjects directory to reuse. " - "(default: OUTPUT_DIR/freesurfer)" + 'Path to existing FreeSurfer subjects directory to reuse. ' + '(default: OUTPUT_DIR/freesurfer)' ), ) g_fs.add_argument( - "--no-submm-recon", - action="store_false", - dest="hires", - help="Disable sub-millimeter (hires) reconstruction", + '--no-submm-recon', + action='store_false', + dest='hires', + help='Disable sub-millimeter (hires) reconstruction', ) g_fs.add_argument( - "--fs-no-reconall", - action="store_false", - dest="run_reconall", - help="Disable FreeSurfer surface preprocessing.", + '--fs-no-reconall', + action='store_false', + dest='run_reconall', + help='Disable FreeSurfer surface preprocessing.', ) - g_other = parser.add_argument_group("Other options") - g_other.add_argument("--version", action="version", version=verstr) + g_other = parser.add_argument_group('Other options') + g_other.add_argument('--version', action='version', version=verstr) g_other.add_argument( - "-v", - "--verbose", - dest="verbose_count", - action="count", + '-v', + '--verbose', + dest='verbose_count', + action='count', default=0, - help="increases log verbosity for each occurrence, debug level is -vvv", + help='increases log verbosity for each occurrence, debug level is -vvv', ) g_other.add_argument( - "-w", - "--work-dir", - action="store", + '-w', + '--work-dir', + action='store', type=Path, - default=Path("work").absolute(), - help="path where intermediate results should be stored", + default=Path('work').absolute(), + help='path where intermediate results should be stored', ) g_other.add_argument( - "--clean-workdir", - action="store_true", + '--clean-workdir', + action='store_true', default=False, help=( - "Clears working directory of contents. Use of this flag is not" - "recommended when running concurrent processes of aslprep." + 'Clears working directory of contents. Use of this flag is not' + 'recommended when running concurrent processes of aslprep.' ), ) g_other.add_argument( - "--resource-monitor", - action="store_true", + '--resource-monitor', + action='store_true', default=False, help="enable Nipype's resource monitoring to keep track of memory and CPU usage", ) g_other.add_argument( - "--config-file", - action="store", - metavar="FILE", + '--config-file', + action='store', + metavar='FILE', help=( - "Use pre-generated configuration file. Values in file will be overridden " - "by command-line arguments." + 'Use pre-generated configuration file. Values in file will be overridden ' + 'by command-line arguments.' ), ) g_other.add_argument( - "--write-graph", - action="store_true", + '--write-graph', + action='store_true', default=False, - help="Write workflow graph.", + help='Write workflow graph.', ) g_other.add_argument( - "--stop-on-first-crash", - action="store_true", + '--stop-on-first-crash', + action='store_true', default=False, - help="Force stopping on first crash, even if a work directory was specified.", + help='Force stopping on first crash, even if a work directory was specified.', ) g_other.add_argument( - "--notrack", - action="store_true", + '--notrack', + action='store_true', default=False, help=( - "Opt-out of sending tracking information of this run to " - "the aslprep developers. This information helps to " - "improve aslprep and provides an indicator of real " - "world usage crucial for obtaining funding." + 'Opt-out of sending tracking information of this run to ' + 'the aslprep developers. This information helps to ' + 'improve aslprep and provides an indicator of real ' + 'world usage crucial for obtaining funding.' ), ) g_other.add_argument( - "--debug", - action="store", - nargs="+", - choices=config.DEBUG_MODES + ("all",), + '--debug', + action='store', + nargs='+', + choices=config.DEBUG_MODES + ('all',), help="Debug mode(s) to enable. 'all' is alias for all available modes.", ) @@ -628,7 +628,7 @@ def _bids_filter(value, parser): _blist = is_flagged() if _blist[0]: - _reason = _blist[1] or "unknown" + _reason = _blist[1] or 'unknown' print( f"""\ WARNING: Version {config.environment.version} of aslprep (current) has been FLAGGED @@ -650,17 +650,17 @@ def parse_args(args=None, namespace=None): parser = _build_parser() opts = parser.parse_args(args, namespace) if opts.config_file: - skip = {} if opts.reports_only else {"execution": ("run_uuid",)} + skip = {} if opts.reports_only else {'execution': ('run_uuid',)} config.load(opts.config_file, skip=skip, init=False) - config.loggers.cli.info(f"Loaded previous configuration file {opts.config_file}") + config.loggers.cli.info(f'Loaded previous configuration file {opts.config_file}') config.execution.log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) - config.from_dict(vars(opts), init=["nipype"]) + config.from_dict(vars(opts), init=['nipype']) # Initialize --output-spaces if not defined if config.execution.output_spaces is None: config.execution.output_spaces = SpatialReferences( - [Reference("MNI152NLin2009cAsym", {"res": "native"})] + [Reference('MNI152NLin2009cAsym', {'res': 'native'})] ) # Retrieve logging level @@ -672,12 +672,12 @@ def parse_args(args=None, namespace=None): with open(opts.use_plugin) as f: plugin_settings = yaml.load(f, Loader=yaml.FullLoader) - _plugin = plugin_settings.get("plugin") + _plugin = plugin_settings.get('plugin') if _plugin: config.nipype.plugin = _plugin - config.nipype.plugin_args = plugin_settings.get("plugin_args", {}) + config.nipype.plugin_args = plugin_settings.get('plugin_args', {}) config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get( - "n_procs", config.nipype.nprocs + 'n_procs', config.nipype.nprocs ) # Resource management options @@ -685,12 +685,12 @@ def parse_args(args=None, namespace=None): # This may need to be revisited if people try to use batch plugins if 1 < config.nipype.nprocs < config.nipype.omp_nthreads: build_log.warning( - f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed " - f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})" + f'Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed ' + f'total threads (--nthreads/--n_cpus={config.nipype.nprocs})' ) # Inform the user about the risk of using brain-extracted images - if config.workflow.skull_strip_t1w == "auto": + if config.workflow.skull_strip_t1w == 'auto': build_log.warning( """\ Option ``--skull-strip-t1w`` was set to 'auto'. A heuristic will be \ @@ -710,7 +710,7 @@ def parse_args(args=None, namespace=None): version = config.environment.version if config.execution.fs_subjects_dir is None: - config.execution.fs_subjects_dir = output_dir / "sourcedata" / "freesurfer" + config.execution.fs_subjects_dir = output_dir / 'sourcedata' / 'freesurfer' if config.execution.aslprep_dir is None: config.execution.aslprep_dir = output_dir @@ -719,9 +719,9 @@ def parse_args(args=None, namespace=None): if opts.clean_workdir and work_dir.exists(): from niworkflows.utils.misc import clean_directory - build_log.info(f"Clearing previous aslprep working directory: {work_dir}") + build_log.info(f'Clearing previous aslprep working directory: {work_dir}') if not clean_directory(work_dir): - build_log.warning(f"Could not clear all contents of working directory: {work_dir}") + build_log.warning(f'Could not clear all contents of working directory: {work_dir}') # Update the config with an empty dict to trigger initialization of all config # sections (we used `init=False` above). @@ -731,16 +731,16 @@ def parse_args(args=None, namespace=None): # Ensure input and output folders are not the same if output_dir == bids_dir: - rec_path = bids_dir / "derivatives" / f"aslprep-{version.split('+')[0]}" + rec_path = bids_dir / 'derivatives' / f"aslprep-{version.split('+')[0]}" parser.error( - "The selected output folder is the same as the input BIDS folder. " - f"Please modify the output path (suggestion: {rec_path})." + 'The selected output folder is the same as the input BIDS folder. ' + f'Please modify the output path (suggestion: {rec_path}).' ) if bids_dir in work_dir.parents: parser.error( - "The selected working directory is a subdirectory of the input BIDS folder. " - "Please modify the output path." + 'The selected working directory is a subdirectory of the input BIDS folder. ' + 'Please modify the output path.' ) # Validate inputs @@ -748,12 +748,12 @@ def parse_args(args=None, namespace=None): from aslprep.utils.bids import validate_input_dir build_log.info( - "Making sure the input data is BIDS compliant (warnings can be ignored in most cases)." + 'Making sure the input data is BIDS compliant (warnings can be ignored in most cases).' ) validate_input_dir(config.environment.exec_env, opts.bids_dir, opts.participant_label) # Setup directories - config.execution.log_dir = config.execution.aslprep_dir / "logs" + config.execution.log_dir = config.execution.aslprep_dir / 'logs' # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) output_dir.mkdir(exist_ok=True, parents=True) diff --git a/aslprep/cli/run.py b/aslprep/cli/run.py index a9d4013c3..6ab88bfcf 100644 --- a/aslprep/cli/run.py +++ b/aslprep/cli/run.py @@ -39,11 +39,11 @@ def main(): parse_args() - if "pdb" in config.execution.debug: + if 'pdb' in config.execution.debug: from aslprep.utils.debug import setup_exceptionhook setup_exceptionhook() - config.nipype.plugin = "Linear" + config.nipype.plugin = 'Linear' sentry_sdk = None if not config.execution.notrack and not config.execution.debug: @@ -56,14 +56,14 @@ def main(): # CRITICAL Save the config to a file. This is necessary because the execution graph # is built as a separate process to keep the memory footprint low. The most # straightforward way to communicate with the child process is via the filesystem. - config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml" + config_file = config.execution.work_dir / config.execution.run_uuid / 'config.toml' config_file.parent.mkdir(exist_ok=True, parents=True) config.to_filename(config_file) # CRITICAL Call build_workflow(config_file, retval) in a subprocess. # Because Python on Linux does not ever free virtual memory (VM), running the # workflow construction jailed within a process preempts excessive VM buildup. - if "pdb" not in config.execution.debug: + if 'pdb' not in config.execution.debug: with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(str(config_file), retval)) @@ -72,13 +72,13 @@ def main(): retval = dict(retval.items()) # Convert to base dictionary if p.exitcode: - retval["return_code"] = p.exitcode + retval['return_code'] = p.exitcode else: retval = build_workflow(str(config_file), {}) - exitcode = retval.get("return_code", 0) - aslprep_wf = retval.get("workflow", None) + exitcode = retval.get('return_code', 0) + aslprep_wf = retval.get('workflow', None) # CRITICAL Load the config from the file. This is necessary because the ``build_workflow`` # function executed constrained in a process may change the config (and thus the global @@ -89,7 +89,7 @@ def main(): sys.exit(int(exitcode > 0)) if aslprep_wf and config.execution.write_graph: - aslprep_wf.write_graph(graph2use="colored", format="svg", simple_form=True) + aslprep_wf.write_graph(graph2use='colored', format='svg', simple_form=True) exitcode = exitcode or (aslprep_wf is None) * EX_SOFTWARE if exitcode != 0: @@ -112,16 +112,16 @@ def main(): # Sentry tracking if sentry_sdk is not None: with sentry_sdk.configure_scope() as scope: - scope.set_tag("run_uuid", config.execution.run_uuid) - scope.set_tag("npart", len(config.execution.participant_label)) - sentry_sdk.add_breadcrumb(message="ASLPrep started", level="info") - sentry_sdk.capture_message("ASLPrep started", level="info") + scope.set_tag('run_uuid', config.execution.run_uuid) + scope.set_tag('npart', len(config.execution.participant_label)) + sentry_sdk.add_breadcrumb(message='ASLPrep started', level='info') + sentry_sdk.capture_message('ASLPrep started', level='info') config.loggers.workflow.log( 15, - "\n".join(["ASLPrep config:"] + [f"\t\t{s}" for s in config.dumps().splitlines()]), + '\n'.join(['ASLPrep config:'] + [f'\t\t{s}' for s in config.dumps().splitlines()]), ) - config.loggers.workflow.log(25, "ASLPrep started!") + config.loggers.workflow.log(25, 'ASLPrep started!') errno = 1 # Default is error exit unless otherwise set try: aslprep_wf.run(**config.nipype.get_plugin()) @@ -130,48 +130,48 @@ def main(): from aslprep.utils.sentry import process_crashfile crashfolders = [ - config.execution.aslprep_dir / f"sub-{s}" / "log" / config.execution.run_uuid + config.execution.aslprep_dir / f'sub-{s}' / 'log' / config.execution.run_uuid for s in config.execution.participant_label ] for crashfolder in crashfolders: - for crashfile in crashfolder.glob("crash*.*"): + for crashfile in crashfolder.glob('crash*.*'): process_crashfile(crashfile) - if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e): + if sentry_sdk is not None and 'Workflow did not execute cleanly' not in str(e): sentry_sdk.capture_exception(e) - config.loggers.workflow.critical("ASLPrep failed: %s", e) + config.loggers.workflow.critical('ASLPrep failed: %s', e) raise else: - config.loggers.workflow.log(25, "ASLPrep finished successfully!") + config.loggers.workflow.log(25, 'ASLPrep finished successfully!') if sentry_sdk is not None: - success_message = "ASLPrep finished without errors" - sentry_sdk.add_breadcrumb(message=success_message, level="info") - sentry_sdk.capture_message(success_message, level="info") + success_message = 'ASLPrep finished without errors' + sentry_sdk.add_breadcrumb(message=success_message, level='info') + sentry_sdk.capture_message(success_message, level='info') # Bother users with the boilerplate only iff the workflow went okay. - boiler_file = config.execution.aslprep_dir / "logs" / "CITATION.md" + boiler_file = config.execution.aslprep_dir / 'logs' / 'CITATION.md' if boiler_file.exists(): if config.environment.exec_env in ( - "singularity", - "docker", - "aslprep-docker", + 'singularity', + 'docker', + 'aslprep-docker', ): - boiler_file = Path("") / boiler_file.relative_to( + boiler_file = Path('') / boiler_file.relative_to( config.execution.output_dir ) config.loggers.workflow.log( 25, - "Works derived from this ASLPrep execution should include the " - f"boilerplate text found in {boiler_file}.", + 'Works derived from this ASLPrep execution should include the ' + f'boilerplate text found in {boiler_file}.', ) if config.workflow.run_reconall: from niworkflows.utils.misc import _copy_any from templateflow import api - dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"])) - _copy_any(dseg_tsv, str(config.execution.aslprep_dir / "desc-aseg_dseg.tsv")) - _copy_any(dseg_tsv, str(config.execution.aslprep_dir / "desc-aparcaseg_dseg.tsv")) + dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv'])) + _copy_any(dseg_tsv, str(config.execution.aslprep_dir / 'desc-aseg_dseg.tsv')) + _copy_any(dseg_tsv, str(config.execution.aslprep_dir / 'desc-aparcaseg_dseg.tsv')) errno = 0 finally: from fmriprep.reports.core import generate_reports @@ -183,22 +183,22 @@ def main(): config.execution.participant_label, config.execution.aslprep_dir, config.execution.run_uuid, - config=data.load("reports-spec.yml"), - packagename="aslprep", + config=data.load('reports-spec.yml'), + packagename='aslprep', ) write_derivative_description(config.execution.bids_dir, config.execution.aslprep_dir) write_bidsignore(config.execution.aslprep_dir) if sentry_sdk is not None and failed_reports: sentry_sdk.capture_message( - f"Report generation failed for {failed_reports} subjects", - level="error", + f'Report generation failed for {failed_reports} subjects', + level='error', ) sys.exit(int((errno + failed_reports) > 0)) -if __name__ == "__main__": +if __name__ == '__main__': raise RuntimeError( - "aslprep/cli/run.py should not be run directly;\n" - "Please `pip install` aslprep and use the `aslprep` command" + 'aslprep/cli/run.py should not be run directly;\n' + 'Please `pip install` aslprep and use the `aslprep` command' ) diff --git a/aslprep/cli/version.py b/aslprep/cli/version.py index 7795438cc..ab8d80e33 100644 --- a/aslprep/cli/version.py +++ b/aslprep/cli/version.py @@ -10,7 +10,7 @@ from aslprep import __version__ RELEASE_EXPIRY_DAYS = 14 -DATE_FMT = "%Y%m%d" +DATE_FMT = '%Y%m%d' def check_latest(): @@ -20,7 +20,7 @@ def check_latest(): latest = None date = None outdated = None - cachefile = Path.home() / ".cache" / "aslprep" / "latest" + cachefile = Path.home() / '.cache' / 'aslprep' / 'latest' try: cachefile.parent.mkdir(parents=True, exist_ok=True) except OSError: @@ -28,7 +28,7 @@ def check_latest(): if cachefile and cachefile.exists(): try: - latest, date = cachefile.read_text().split("|") + latest, date = cachefile.read_text().split('|') except Exception: pass else: @@ -43,12 +43,12 @@ def check_latest(): if latest is None or outdated is True: try: - response = requests.get(url="https://pypi.org/pypi/alprep/json", timeout=1.0) + response = requests.get(url='https://pypi.org/pypi/alprep/json', timeout=1.0) except Exception: response = None if response and response.status_code == 200: - versions = [Version(rel) for rel in response.json()["releases"].keys()] + versions = [Version(rel) for rel in response.json()['releases'].keys()] versions = [rel for rel in versions if not rel.is_prerelease] if versions: latest = sorted(versions)[-1] @@ -57,7 +57,7 @@ def check_latest(): if cachefile is not None and latest is not None: try: - cachefile.write_text("|".join((latest, datetime.now().strftime(DATE_FMT)))) + cachefile.write_text('|'.join((latest, datetime.now().strftime(DATE_FMT)))) except Exception: pass @@ -78,7 +78,7 @@ def is_flagged(): response = None if response and response.status_code == 200: - flagged = response.json().get("flagged", {}) or {} + flagged = response.json().get('flagged', {}) or {} if __version__ in flagged: return True, flagged[__version__] diff --git a/aslprep/cli/workflow.py b/aslprep/cli/workflow.py index 873902782..509bc2415 100644 --- a/aslprep/cli/workflow.py +++ b/aslprep/cli/workflow.py @@ -26,30 +26,30 @@ def build_workflow(config_file, retval): version = config.environment.version - retval["return_code"] = 1 - retval["workflow"] = None + retval['return_code'] = 1 + retval['workflow'] = None - banner = [f"Running ASLPrep version {version}"] - notice_path = data.load.readable("NOTICE") + banner = [f'Running ASLPrep version {version}'] + notice_path = data.load.readable('NOTICE') if notice_path.exists(): - banner[0] += "\n" + banner[0] += '\n' banner += [f"License NOTICE {'#' * 50}"] - banner += [f"ASLPrep {version}"] + banner += [f'ASLPrep {version}'] banner += notice_path.read_text().splitlines(keepends=False)[1:] - banner += ["#" * len(banner[1])] + banner += ['#' * len(banner[1])] build_log.log(25, f"\n{' ' * 9}".join(banner)) # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( - "ASLPrep", + 'ASLPrep', version, - config.execution.aslprep_dir / "dataset_description.json", + config.execution.aslprep_dir / 'dataset_description.json', ) if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json - dset_desc_path = config.execution.bids_dir / "dataset_description.json" + dset_desc_path = config.execution.bids_dir / 'dataset_description.json' if dset_desc_path.exists(): from hashlib import sha256 @@ -65,34 +65,34 @@ def build_workflow(config_file, retval): if config.execution.reports_only: from aslprep.data import load as load_data - build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) - retval["return_code"] = generate_reports( + build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list)) + retval['return_code'] = generate_reports( subject_list, config.execution.aslprep_dir, config.execution.run_uuid, - config=load_data("reports-spec.yml"), - packagename="aslprep", + config=load_data('reports-spec.yml'), + packagename='aslprep', ) return retval # Build main workflow init_msg = [ "Building ASLPrep's workflow:", - f"BIDS dataset path: {config.execution.bids_dir}.", - f"Participant list: {subject_list}.", - f"Run identifier: {config.execution.run_uuid}.", - f"Output spaces: {config.execution.output_spaces}.", + f'BIDS dataset path: {config.execution.bids_dir}.', + f'Participant list: {subject_list}.', + f'Run identifier: {config.execution.run_uuid}.', + f'Output spaces: {config.execution.output_spaces}.', ] if config.execution.derivatives: - init_msg += [f"Searching for derivatives: {config.execution.derivatives}."] + init_msg += [f'Searching for derivatives: {config.execution.derivatives}.'] if config.execution.fs_subjects_dir: init_msg += [f"Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."] build_log.log(25, f"\n{' ' * 11}* ".join(init_msg)) - retval["workflow"] = init_aslprep_wf() + retval['workflow'] = init_aslprep_wf() # Check for FS license after building the workflow if not check_valid_fs_license(): @@ -103,25 +103,25 @@ def build_workflow(config_file, retval): 2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \ (for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""" ) - retval["return_code"] = 126 # 126 == Command invoked cannot execute. + retval['return_code'] = 126 # 126 == Command invoked cannot execute. return retval # Check workflow for missing commands - missing = check_deps(retval["workflow"]) + missing = check_deps(retval['workflow']) if missing: build_log.critical( - "Cannot run ASLPrep. Missing dependencies:%s", - "\n\t* ".join([""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), + 'Cannot run ASLPrep. Missing dependencies:%s', + '\n\t* '.join([''] + [f'{cmd} (Interface: {iface})' for iface, cmd in missing]), ) - retval["return_code"] = 127 # 127 == command not found. + retval['return_code'] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info( - "ASLPrep workflow graph with %d nodes built successfully.", - len(retval["workflow"]._get_all_nodes()), + 'ASLPrep workflow graph with %d nodes built successfully.', + len(retval['workflow']._get_all_nodes()), ) - retval["return_code"] = 0 + retval['return_code'] = 0 return retval @@ -130,9 +130,9 @@ def build_boilerplate(config_file, workflow): from aslprep import config config.load(config_file) - logs_path = config.execution.aslprep_dir / "logs" + logs_path = config.execution.aslprep_dir / 'logs' boilerplate = workflow.visit_desc() - citation_files = {ext: logs_path / f"CITATION.{ext}" for ext in ("bib", "tex", "md", "html")} + citation_files = {ext: logs_path / f'CITATION.{ext}' for ext in ('bib', 'tex', 'md', 'html')} if boilerplate: # To please git-annex users and also to guarantee consistency @@ -144,9 +144,9 @@ def build_boilerplate(config_file, workflow): except FileNotFoundError: pass - citation_files["md"].write_text(boilerplate) + citation_files['md'].write_text(boilerplate) - if not config.execution.md_only_boilerplate and citation_files["md"].exists(): + if not config.execution.md_only_boilerplate and citation_files['md'].exists(): from shutil import copyfile from subprocess import CalledProcessError, TimeoutExpired, check_call @@ -154,39 +154,39 @@ def build_boilerplate(config_file, workflow): # Generate HTML file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(load_data("boilerplate.bib")), - "--citeproc", - "--metadata", + 'pandoc', + '-s', + '--bibliography', + str(load_data('boilerplate.bib')), + '--citeproc', + '--metadata', 'pagetitle="ASLPrep citation boilerplate"', - str(citation_files["md"]), - "-o", - str(citation_files["html"]), + str(citation_files['md']), + '-o', + str(citation_files['html']), ] - config.loggers.cli.info("Generating an HTML version of the citation boilerplate...") + config.loggers.cli.info('Generating an HTML version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.html file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.html file:\n%s', ' '.join(cmd)) # Generate LaTex file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(load_data("boilerplate.bib")), - "--natbib", - str(citation_files["md"]), - "-o", - str(citation_files["tex"]), + 'pandoc', + '-s', + '--bibliography', + str(load_data('boilerplate.bib')), + '--natbib', + str(citation_files['md']), + '-o', + str(citation_files['tex']), ] - config.loggers.cli.info("Generating a LaTeX version of the citation boilerplate...") + config.loggers.cli.info('Generating a LaTeX version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.tex file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.tex file:\n%s', ' '.join(cmd)) else: - copyfile(load_data("boilerplate.bib"), citation_files["bib"]) + copyfile(load_data('boilerplate.bib'), citation_files['bib']) diff --git a/aslprep/config.py b/aslprep/config.py index 7170c7c69..6e9b6ac3e 100644 --- a/aslprep/config.py +++ b/aslprep/config.py @@ -91,14 +91,14 @@ from multiprocessing import set_start_method # Disable NiPype etelemetry always -_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None) -os.environ["NIPYPE_NO_ET"] = "1" -os.environ["NO_ET"] = "1" +_disable_et = bool(os.getenv('NO_ET') is not None or os.getenv('NIPYPE_NO_ET') is not None) +os.environ['NIPYPE_NO_ET'] = '1' +os.environ['NO_ET'] = '1' -CONFIG_FILENAME = "aslprep.toml" +CONFIG_FILENAME = 'aslprep.toml' try: - set_start_method("forkserver") + set_start_method('forkserver') except RuntimeError: pass # context has been already set finally: @@ -115,28 +115,28 @@ from aslprep import __version__ -if not hasattr(sys, "_is_pytest_session"): +if not hasattr(sys, '_is_pytest_session'): sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings # Disable all warnings in main and children processes only on production versions if not any( ( - "+" in __version__, - __version__.endswith(".dirty"), - os.getenv("ASLPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes"), + '+' in __version__, + __version__.endswith('.dirty'), + os.getenv('ASLPREP_DEV', '0').lower() in ('1', 'on', 'true', 'y', 'yes'), ) ): from aslprep._warnings import logging - os.environ["PYTHONWARNINGS"] = "ignore" -elif os.getenv("ASLPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"): + os.environ['PYTHONWARNINGS'] = 'ignore' +elif os.getenv('ASLPREP_WARNINGS', '0').lower() in ('1', 'on', 'true', 'y', 'yes'): # allow disabling warnings on development versions # https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765 from aslprep._warnings import logging else: import logging -logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING -logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG +logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING +logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG DEFAULT_MEMORY_MIN_GB = 0.01 @@ -150,29 +150,29 @@ from requests import get as _get_url with suppress((ConnectionError, ReadTimeout)): - _get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05) + _get_url('https://rig.mit.edu/et/projects/nipy/nipype', timeout=0.05) # Execution environment _exec_env = os.name _docker_ver = None # special variable set in the container -if os.getenv("IS_DOCKER_8395080871"): - _exec_env = "singularity" - _cgroup = Path("/proc/1/cgroup") - if _cgroup.exists() and "docker" in _cgroup.read_text(): - _docker_ver = os.getenv("DOCKER_VERSION_8395080871") - _exec_env = "aslprep-docker" if _docker_ver else "docker" +if os.getenv('IS_DOCKER_8395080871'): + _exec_env = 'singularity' + _cgroup = Path('/proc/1/cgroup') + if _cgroup.exists() and 'docker' in _cgroup.read_text(): + _docker_ver = os.getenv('DOCKER_VERSION_8395080871') + _exec_env = 'aslprep-docker' if _docker_ver else 'docker' del _cgroup -_fs_license = os.getenv("FS_LICENSE") -if not _fs_license and os.getenv("FREESURFER_HOME"): - _fs_home = os.getenv("FREESURFER_HOME") - if _fs_home and (Path(_fs_home) / "license.txt").is_file(): - _fs_license = str(Path(_fs_home) / "license.txt") +_fs_license = os.getenv('FS_LICENSE') +if not _fs_license and os.getenv('FREESURFER_HOME'): + _fs_home = os.getenv('FREESURFER_HOME') + if _fs_home and (Path(_fs_home) / 'license.txt').is_file(): + _fs_license = str(Path(_fs_home) / 'license.txt') del _fs_home _templateflow_home = Path( - os.getenv("TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")) + os.getenv('TEMPLATEFLOW_HOME', os.path.join(os.getenv('HOME'), '.cache', 'templateflow')) ) try: @@ -182,20 +182,20 @@ except Exception: _free_mem_at_start = None -_oc_limit = "n/a" -_oc_policy = "n/a" +_oc_limit = 'n/a' +_oc_policy = 'n/a' try: # Memory policy may have a large effect on types of errors experienced - _proc_oc_path = Path("/proc/sys/vm/overcommit_memory") + _proc_oc_path = Path('/proc/sys/vm/overcommit_memory') if _proc_oc_path.exists(): - _oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get( - _proc_oc_path.read_text().strip(), "unknown" + _oc_policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get( + _proc_oc_path.read_text().strip(), 'unknown' ) - if _oc_policy != "never": - _proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes") + if _oc_policy != 'never': + _proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes') if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() - if _oc_limit in ("0", "n/a") and Path("/proc/sys/vm/overcommit_ratio").exists(): + if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists(): _oc_limit = f"{Path('/proc/sys/vm/overcommit_ratio').read_text().strip()}%" except Exception: pass @@ -203,7 +203,7 @@ # Debug modes are names that influence the exposure of internal details to # the user, either through additional derivatives or increased verbosity -DEBUG_MODES = ("pdb",) +DEBUG_MODES = ('pdb',) class _Config: @@ -213,7 +213,7 @@ class _Config: def __init__(self): """Avert instantiation.""" - raise RuntimeError("Configuration type is not instantiable.") + raise RuntimeError('Configuration type is not instantiable.') @classmethod def load(cls, settings, init=True, ignore=None): @@ -243,7 +243,7 @@ def get(cls): out = {} for k, v in cls.__dict__.items(): - if k.startswith("_") or v is None: + if k.startswith('_') or v is None: continue if callable(getattr(cls, k)): continue @@ -253,7 +253,7 @@ def get(cls): else: v = str(v) if isinstance(v, SpatialReferences): - v = " ".join(str(s) for s in v.references) or None + v = ' '.join(str(s) for s in v.references) or None if isinstance(v, Reference): v = str(v) or None out[k] = v @@ -297,7 +297,7 @@ class environment(_Config): class nipype(_Config): """Nipype settings.""" - crashfile_format = "txt" + crashfile_format = 'txt' """The file format for crashfiles, either text or pickle.""" get_linked_libs = False """Run NiPype's tool to enlist linked libraries for every interface.""" @@ -307,11 +307,11 @@ class nipype(_Config): """Number of processes (compute tasks) that can be run in parallel (multiprocessing only).""" omp_nthreads = None """Number of CPUs a single process can access for multithreaded execution.""" - plugin = "MultiProc" + plugin = 'MultiProc' """NiPype's execution plugin.""" plugin_args = { - "maxtasksperchild": 1, - "raise_insufficient": False, + 'maxtasksperchild': 1, + 'raise_insufficient': False, } """Settings for NiPype's execution plugin.""" resource_monitor = False @@ -323,13 +323,13 @@ class nipype(_Config): def get_plugin(cls): """Format a dictionary for Nipype consumption.""" out = { - "plugin": cls.plugin, - "plugin_args": cls.plugin_args, + 'plugin': cls.plugin, + 'plugin_args': cls.plugin_args, } - if cls.plugin in ("MultiProc", "LegacyMultiProc"): - out["plugin_args"]["n_procs"] = int(cls.nprocs) + if cls.plugin in ('MultiProc', 'LegacyMultiProc'): + out['plugin_args']['n_procs'] = int(cls.nprocs) if cls.memory_gb: - out["plugin_args"]["memory_gb"] = float(cls.memory_gb) + out['plugin_args']['memory_gb'] = float(cls.memory_gb) return out @classmethod @@ -341,10 +341,10 @@ def init(cls): if cls.resource_monitor: ncfg.update_config( { - "monitoring": { - "enabled": cls.resource_monitor, - "sample_frequency": "0.5", - "summary_append": True, + 'monitoring': { + 'enabled': cls.resource_monitor, + 'sample_frequency': '0.5', + 'summary_append': True, } } ) @@ -353,12 +353,12 @@ def init(cls): # Nipype config (logs and execution) ncfg.update_config( { - "execution": { - "crashdump_dir": str(execution.log_dir), - "crashfile_format": cls.crashfile_format, - "get_linked_libs": cls.get_linked_libs, - "stop_on_first_crash": cls.stop_on_first_crash, - "check_version": False, # disable future telemetry + 'execution': { + 'crashdump_dir': str(execution.log_dir), + 'crashfile_format': cls.crashfile_format, + 'get_linked_libs': cls.get_linked_libs, + 'stop_on_first_crash': cls.stop_on_first_crash, + 'check_version': False, # disable future telemetry } } ) @@ -419,7 +419,7 @@ class execution(_Config): """Select a particular task from all available in the dataset.""" templateflow_home = _templateflow_home """The root folder of the TemplateFlow client.""" - work_dir = Path("work").absolute() + work_dir = Path('work').absolute() """Path to a working directory where intermediate results will be available.""" write_graph = False """Write out the computational graph corresponding to the planned preprocessing.""" @@ -427,24 +427,24 @@ class execution(_Config): _layout = None _paths = ( - "bids_dir", - "derivatives", - "bids_database_dir", - "aslprep_dir", - "fs_license_file", - "fs_subjects_dir", - "layout", - "log_dir", - "output_dir", - "templateflow_home", - "work_dir", + 'bids_dir', + 'derivatives', + 'bids_database_dir', + 'aslprep_dir', + 'fs_license_file', + 'fs_subjects_dir', + 'layout', + 'log_dir', + 'output_dir', + 'templateflow_home', + 'work_dir', ) @classmethod def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls.fs_license_file and Path(cls.fs_license_file).is_file(): - os.environ["FS_LICENSE"] = str(cls.fs_license_file) + os.environ['FS_LICENSE'] = str(cls.fs_license_file) if cls._layout is None: import re @@ -452,20 +452,20 @@ def init(cls): from bids.layout import BIDSLayout from bids.layout.index import BIDSLayoutIndexer - _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / "bids_db") + _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / 'bids_db') _db_path.mkdir(exist_ok=True, parents=True) # Recommended after PyBIDS 12.1 _indexer = BIDSLayoutIndexer( validate=False, ignore=( - "code", - "stimuli", - "sourcedata", - "models", - re.compile(r"\/\.\w+|^\.\w+"), + 'code', + 'stimuli', + 'sourcedata', + 'models', + re.compile(r'\/\.\w+|^\.\w+'), re.compile( - r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|func)" + r'sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|func)' ), ), ) @@ -487,7 +487,7 @@ def _process_value(value): else: return ( getattr(Query, value[7:-4]) - if not isinstance(value, Query) and "Query" in value + if not isinstance(value, Query) and 'Query' in value else value ) @@ -496,7 +496,7 @@ def _process_value(value): for k, v in filters.items(): cls.bids_filters[acq][k] = _process_value(v) - if "all" in cls.debug: + if 'all' in cls.debug: cls.debug = list(DEBUG_MODES) @@ -518,7 +518,7 @@ class workflow(_Config): """Execute the anatomical preprocessing only.""" asl2t1w_dof = None """Degrees of freedom of the ASL-to-T1w registration steps.""" - asl2t1w_init = "register" + asl2t1w_init = 'register' """Whether to use standard coregistration ('register') or to initialize coregistration from the ASL image-header ('header').""" m0_scale = float(1) @@ -537,7 +537,7 @@ class workflow(_Config): """Run FreeSurfer ``recon-all`` with the ``-hires`` flag.""" ignore = None """Ignore particular steps for *ASLPrep*.""" - level = "full" + level = 'full' """Level of preprocessing to complete. One of ['minimal', 'resampling', 'full'].""" longitudinal = False """Run FreeSurfer ``recon-all`` with the ``--longitudinal`` flag.""" @@ -551,9 +551,9 @@ class workflow(_Config): """Run FreeSurfer's surface reconstruction.""" skull_strip_fixed_seed = False """Fix a seed for skull-stripping.""" - skull_strip_template = "OASIS30ANTs" + skull_strip_template = 'OASIS30ANTs' """Change default brain extraction template.""" - skull_strip_t1w = "force" + skull_strip_t1w = 'force' """Skip brain extraction of the T1w image (default is ``force``, meaning that *ASLPrep* will run brain extraction of the T1w).""" spaces = None @@ -585,18 +585,18 @@ def init(cls): class loggers: """Keep loggers easily accessible (see :py:func:`init`).""" - _fmt = "%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s" - _datefmt = "%y%m%d-%H:%M:%S" + _fmt = '%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s' + _datefmt = '%y%m%d-%H:%M:%S' default = logging.getLogger() """The root logger.""" - cli = logging.getLogger("cli") + cli = logging.getLogger('cli') """Command-line interface logging.""" - workflow = logging.getLogger("nipype.workflow") + workflow = logging.getLogger('nipype.workflow') """NiPype's workflow logger.""" - interface = logging.getLogger("nipype.interface") + interface = logging.getLogger('nipype.interface') """NiPype's interface logger.""" - utils = logging.getLogger("nipype.utils") + utils = logging.getLogger('nipype.utils') """NiPype's utils logger.""" @classmethod @@ -621,7 +621,7 @@ def init(cls): cls.workflow.setLevel(execution.log_level) cls.utils.setLevel(execution.log_level) ncfg.update_config( - {"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}} + {'logging': {'log_directory': str(execution.log_dir), 'log_to_file': True}} ) @@ -652,7 +652,7 @@ def init(cls): def _set_ants_seed(): """Fix random seed for antsRegistration, antsAI, antsMotionCorr.""" val = random.randint(1, 65536) - os.environ["ANTS_RANDOM_SEED"] = str(val) + os.environ['ANTS_RANDOM_SEED'] = str(val) return val @@ -682,10 +682,10 @@ def from_dict(settings, init=True, ignore=None): def initialize(x): return init if init in (True, False) else x in init - nipype.load(settings, init=initialize("nipype"), ignore=ignore) - execution.load(settings, init=initialize("execution"), ignore=ignore) - workflow.load(settings, init=initialize("workflow"), ignore=ignore) - seeds.load(settings, init=initialize("seeds"), ignore=ignore) + nipype.load(settings, init=initialize('nipype'), ignore=ignore) + execution.load(settings, init=initialize('execution'), ignore=ignore) + workflow.load(settings, init=initialize('workflow'), ignore=ignore) + seeds.load(settings, init=initialize('seeds'), ignore=ignore) loggers.init() @@ -713,7 +713,7 @@ def initialize(x): filename = Path(filename) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(sys.modules[__name__], sectionname) ignore = skip.get(sectionname) section.load(configs, ignore=ignore, init=initialize(sectionname)) @@ -723,17 +723,17 @@ def initialize(x): def get(flat=False): """Get config as a dict.""" settings = { - "environment": environment.get(), - "execution": execution.get(), - "workflow": workflow.get(), - "nipype": nipype.get(), - "seeds": seeds.get(), + 'environment': environment.get(), + 'execution': execution.get(), + 'workflow': workflow.get(), + 'nipype': nipype.get(), + 'seeds': seeds.get(), } if not flat: return settings return { - ".".join((section, k)): v + '.'.join((section, k)): v for section, configs in settings.items() for k, v in configs.items() } @@ -759,15 +759,15 @@ def init_spaces(checkpoint=True): spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences( - [ref for s in spaces.split(" ") for ref in Reference.from_string(s)] + [ref for s in spaces.split(' ') for ref in Reference.from_string(s)] ) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # Add the default standard space if not already present (required by several sub-workflows) - if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)): - spaces.add(Reference("MNI152NLin2009cAsym", {})) + if 'MNI152NLin2009cAsym' not in spaces.get_spaces(nonstandard=False, dim=(3,)): + spaces.add(Reference('MNI152NLin2009cAsym', {})) # Ensure user-defined spatial references for outputs are correctly parsed. # Certain options require normalization to a space not explicitly defined by users. @@ -775,8 +775,8 @@ def init_spaces(checkpoint=True): cifti_output = workflow.cifti_output if cifti_output: # CIFTI grayordinates to corresponding FSL-MNI resolutions. - vol_res = "2" if cifti_output == "91k" else "1" - spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res})) + vol_res = '2' if cifti_output == '91k' else '1' + spaces.add(Reference('MNI152NLin6Asym', {'res': vol_res})) # Make the SpatialReferences object available workflow.spaces = spaces diff --git a/aslprep/data/__init__.py b/aslprep/data/__init__.py index 4cfb23b9b..baeb81891 100644 --- a/aslprep/data/__init__.py +++ b/aslprep/data/__init__.py @@ -36,7 +36,7 @@ except ImportError: from importlib_resources.abc import Traversable -__all__ = ["load"] +__all__ = ['load'] class Loader: @@ -111,7 +111,7 @@ class Loader: .. automethod:: cached """ - def __init__(self, anchor: Union[str, ModuleType]): + def __init__(self, anchor: str | ModuleType): self._anchor = anchor self.files = files(anchor) self.exit_stack = ExitStack() @@ -128,19 +128,19 @@ def _doc(self): directory. """ top_level = sorted( - os.path.relpath(p, self.files) + "/"[: p.is_dir()] + os.path.relpath(p, self.files) + '/'[: p.is_dir()] for p in self.files.iterdir() - if p.name[0] not in (".", "_") and p.name != "tests" + if p.name[0] not in ('.', '_') and p.name != 'tests' ) doclines = [ - f"Load package files relative to ``{self._anchor}``.", - "", - "This package contains the following (top-level) files/directories:", - "", - *(f"* ``{path}``" for path in top_level), + f'Load package files relative to ``{self._anchor}``.', + '', + 'This package contains the following (top-level) files/directories:', + '', + *(f'* ``{path}``' for path in top_level), ] - return "\n".join(doclines) + return '\n'.join(doclines) def readable(self, *segments) -> Traversable: """Provide read access to a resource through a Path-like interface. diff --git a/aslprep/interfaces/__init__.py b/aslprep/interfaces/__init__.py index 6c36cf309..a17bca907 100644 --- a/aslprep/interfaces/__init__.py +++ b/aslprep/interfaces/__init__.py @@ -14,13 +14,13 @@ ) __all__ = [ - "ants", - "bids", - "cbf", - "confounds", - "parcellation", - "plotting", - "reference", - "reports", - "utility", + 'ants', + 'bids', + 'cbf', + 'confounds', + 'parcellation', + 'plotting', + 'reference', + 'reports', + 'utility', ] diff --git a/aslprep/interfaces/ants.py b/aslprep/interfaces/ants.py index c4db97550..9c266967d 100644 --- a/aslprep/interfaces/ants.py +++ b/aslprep/interfaces/ants.py @@ -11,17 +11,17 @@ class _ApplyTransformsInputSpec(_FixTraitApplyTransformsInputSpec): # Nipype's version doesn't have GenericLabel interpolation = traits.Enum( - "Linear", - "NearestNeighbor", - "CosineWindowedSinc", - "WelchWindowedSinc", - "HammingWindowedSinc", - "LanczosWindowedSinc", - "MultiLabel", - "Gaussian", - "BSpline", - "GenericLabel", - argstr="%s", + 'Linear', + 'NearestNeighbor', + 'CosineWindowedSinc', + 'WelchWindowedSinc', + 'HammingWindowedSinc', + 'LanczosWindowedSinc', + 'MultiLabel', + 'Gaussian', + 'BSpline', + 'GenericLabel', + argstr='%s', usedefault=True, ) @@ -42,7 +42,7 @@ def _run_interface(self, runtime): # Run normally self.inputs.output_image = fname_presuffix( self.inputs.input_image, - suffix="_trans.nii.gz", + suffix='_trans.nii.gz', newpath=runtime.cwd, use_ext=False, ) diff --git a/aslprep/interfaces/bids.py b/aslprep/interfaces/bids.py index e17c1f81b..6b16b43a0 100644 --- a/aslprep/interfaces/bids.py +++ b/aslprep/interfaces/bids.py @@ -17,16 +17,16 @@ from aslprep.data import load as load_data # NOTE: Modified for aslprep's purposes -aslprep_spec = loads(load_data.readable("aslprep_bids_config.json").read_text()) -bids_config = Config.load("bids") -deriv_config = Config.load("derivatives") +aslprep_spec = loads(load_data.readable('aslprep_bids_config.json').read_text()) +bids_config = Config.load('bids') +deriv_config = Config.load('derivatives') -aslprep_entities = {v["name"]: v["pattern"] for v in aslprep_spec["entities"]} +aslprep_entities = {v['name']: v['pattern'] for v in aslprep_spec['entities']} merged_entities = {**bids_config.entities, **deriv_config.entities} merged_entities = {k: v.pattern for k, v in merged_entities.items()} merged_entities = {**merged_entities, **aslprep_entities} -merged_entities = [{"name": k, "pattern": v} for k, v in merged_entities.items()] -config_entities = frozenset({e["name"] for e in merged_entities}) +merged_entities = [{'name': k, 'pattern': v} for k, v in merged_entities.items()] +config_entities = frozenset({e['name'] for e in merged_entities}) class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec): @@ -35,14 +35,14 @@ class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec): class _BIDSDataGrabberOutputSpec(TraitedSpec): - out_dict = traits.Dict(desc="output data structure") - fmap = OutputMultiObject(desc="output fieldmaps") - asl = OutputMultiObject(desc="output ASL images") - sbref = OutputMultiObject(desc="output sbrefs") - t1w = OutputMultiObject(desc="output T1w images") - roi = OutputMultiObject(desc="output ROI images") - t2w = OutputMultiObject(desc="output T2w images") - flair = OutputMultiObject(desc="output FLAIR images") + out_dict = traits.Dict(desc='output data structure') + fmap = OutputMultiObject(desc='output fieldmaps') + asl = OutputMultiObject(desc='output ASL images') + sbref = OutputMultiObject(desc='output sbrefs') + t1w = OutputMultiObject(desc='output T1w images') + roi = OutputMultiObject(desc='output ROI images') + t2w = OutputMultiObject(desc='output T2w images') + flair = OutputMultiObject(desc='output FLAIR images') class BIDSDataGrabber(SimpleInterface): @@ -53,7 +53,7 @@ class BIDSDataGrabber(SimpleInterface): _require_funcs = True def __init__(self, *args, **kwargs): - anat_only = kwargs.pop("anat_only") + anat_only = kwargs.pop('anat_only') super(BIDSDataGrabber, self).__init__(*args, **kwargs) if anat_only is not None: self._require_funcs = not anat_only @@ -61,20 +61,20 @@ def __init__(self, *args, **kwargs): def _run_interface(self, runtime): bids_dict = self.inputs.subject_data - self._results["out_dict"] = bids_dict + self._results['out_dict'] = bids_dict self._results.update(bids_dict) - if not bids_dict["t1w"]: + if not bids_dict['t1w']: raise FileNotFoundError( - f"No T1w images found for subject sub-{self.inputs.subject_id}" + f'No T1w images found for subject sub-{self.inputs.subject_id}' ) - if self._require_funcs and not bids_dict["asl"]: + if self._require_funcs and not bids_dict['asl']: raise FileNotFoundError( - f"No ASL images found for subject sub-{self.inputs.subject_id}" + f'No ASL images found for subject sub-{self.inputs.subject_id}' ) - for imtype in ["t2w", "flair", "fmap", "sbref", "roi", "asl"]: + for imtype in ['t2w', 'flair', 'fmap', 'sbref', 'roi', 'asl']: if not bids_dict[imtype]: config.loggers.interface.info( 'No "%s" images found for sub-%s', @@ -91,11 +91,11 @@ class DerivativesDataSink(BaseDerivativesDataSink): A child class of the niworkflows DerivativesDataSink, using aslprep's configuration files. """ - out_path_base = "" + out_path_base = '' _allowed_entities = set(config_entities) _config_entities = config_entities _config_entities_dict = merged_entities - _file_patterns = aslprep_spec["default_path_patterns"] + _file_patterns = aslprep_spec['default_path_patterns'] class OverrideDerivativesDataSink: diff --git a/aslprep/interfaces/cbf.py b/aslprep/interfaces/cbf.py index 071b81ceb..57a6c1f83 100644 --- a/aslprep/interfaces/cbf.py +++ b/aslprep/interfaces/cbf.py @@ -34,13 +34,13 @@ class _RefineMaskInputSpec(BaseInterfaceInputSpec): - t1w_mask = File(exists=True, mandatory=True, desc="t1 mask") - asl_mask = File(exists=True, mandatory=True, desc="asl mask") - m0_mask = File(exists=True, mandatory=False, desc="M0 mask (if available)") + t1w_mask = File(exists=True, mandatory=True, desc='t1 mask') + asl_mask = File(exists=True, mandatory=True, desc='asl mask') + m0_mask = File(exists=True, mandatory=False, desc='M0 mask (if available)') class _RefineMaskOutputSpec(TraitedSpec): - out_mask = File(exists=False, desc="output mask") + out_mask = File(exists=False, desc='output mask') class RefineMask(SimpleInterface): @@ -52,9 +52,9 @@ class RefineMask(SimpleInterface): def _run_interface(self, runtime): from nilearn import image - self._results["out_mask"] = fname_presuffix( + self._results['out_mask'] = fname_presuffix( self.inputs.asl_mask, - suffix="_refinemask", + suffix='_refinemask', newpath=runtime.cwd, ) @@ -66,28 +66,28 @@ def _run_interface(self, runtime): img3 = nb.load(self.inputs.m0_mask) img3 = nb.funcs.squeeze_image(img3) out_mask = image.math_img( - "img1 * img2 * img3", + 'img1 * img2 * img3', img1=img1, img2=img2, img3=img3, ) else: out_mask = image.math_img( - "img1 * img2", + 'img1 * img2', img1=img1, img2=img2, ) - out_mask.to_filename(self._results["out_mask"]) + out_mask.to_filename(self._results['out_mask']) return runtime class _ExtractCBFInputSpec(BaseInterfaceInputSpec): - name_source = File(exists=True, mandatory=True, desc="raw asl file") - asl_file = File(exists=True, mandatory=True, desc="preprocessed asl file") - metadata = traits.Dict(mandatory=True, desc="metadata for ASL file") - aslcontext = File(exists=True, mandatory=True, desc="aslcontext TSV file for run.") + name_source = File(exists=True, mandatory=True, desc='raw asl file') + asl_file = File(exists=True, mandatory=True, desc='preprocessed asl file') + metadata = traits.Dict(mandatory=True, desc='metadata for ASL file') + aslcontext = File(exists=True, mandatory=True, desc='aslcontext TSV file for run.') m0scan = traits.Either( File(exists=True), None, @@ -100,30 +100,30 @@ class _ExtractCBFInputSpec(BaseInterfaceInputSpec): mandatory=True, desc="metadata for M0 scan. Only defined if M0Type is 'Separate'.", ) - in_mask = File(exists=True, mandatory=True, desc="mask") + in_mask = File(exists=True, mandatory=True, desc='mask') dummy_scans = traits.Int( default_value=0, usedefault=True, mandatory=False, - desc="remove first n volumes", + desc='remove first n volumes', ) - fwhm = traits.Float(default_value=5, usedefault=True, mandatory=False, desc="fwhm") + fwhm = traits.Float(default_value=5, usedefault=True, mandatory=False, desc='fwhm') class _ExtractCBFOutputSpec(TraitedSpec): - out_file = File(exists=False, desc="Either CBF or deltaM time series.") - m0_file = File(exists=False, desc="Mean M0 image, after smoothing.") + out_file = File(exists=False, desc='Either CBF or deltaM time series.') + m0_file = File(exists=False, desc='Mean M0 image, after smoothing.') metadata = traits.Dict( desc=( - "Metadata for the ASL run. " - "The dictionary may be modified to only include metadata associated with the selected " - "volumes." + 'Metadata for the ASL run. ' + 'The dictionary may be modified to only include metadata associated with the selected ' + 'volumes.' ), ) m0tr = traits.Either( traits.Float, None, - desc="RepetitionTimePreparation for M0 scans.", + desc='RepetitionTimePreparation for M0 scans.', ) @@ -148,20 +148,20 @@ def _run_interface(self, runtime): if aslcontext.shape[0] != asl_img.shape[3]: raise ValueError( - f"Number of rows in aslcontext ({aslcontext.shape[0]}) != " - f"number of volumes in ASL file ({asl_img.shape[3]})" + f'Number of rows in aslcontext ({aslcontext.shape[0]}) != ' + f'number of volumes in ASL file ({asl_img.shape[3]})' ) # get the control, tag, moscan or label - vol_types = aslcontext["volume_type"].tolist() - control_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == "control"] - label_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == "label"] - m0_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == "m0scan"] - deltam_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == "deltam"] - cbf_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == "cbf"] + vol_types = aslcontext['volume_type'].tolist() + control_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == 'control'] + label_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == 'label'] + m0_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == 'm0scan'] + deltam_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == 'deltam'] + cbf_volume_idx = [i for i, vol_type in enumerate(vol_types) if vol_type == 'cbf'] # extract m0 file and register it to ASL if separate - if metadata["M0Type"] == "Separate": + if metadata['M0Type'] == 'Separate': m0file = self.inputs.m0scan m0data_smooth = smooth_image(nb.load(m0file), fwhm=self.inputs.fwhm).get_fdata() if len(m0data_smooth.shape) > 3: @@ -169,35 +169,35 @@ def _run_interface(self, runtime): else: m0data = mask_data * m0data_smooth - m0tr = self.inputs.m0scan_metadata["RepetitionTimePreparation"] + m0tr = self.inputs.m0scan_metadata['RepetitionTimePreparation'] if np.array(m0tr).size > 1 and np.std(m0tr) > 0: - raise ValueError("M0 scans have variable TR. ASLPrep does not support this.") + raise ValueError('M0 scans have variable TR. ASLPrep does not support this.') - elif metadata["M0Type"] == "Included": + elif metadata['M0Type'] == 'Included': m0data = asl_data[:, :, :, m0_volume_idx] m0img = nb.Nifti1Image(m0data, asl_img.affine, asl_img.header) m0data_smooth = smooth_image(m0img, fwhm=self.inputs.fwhm).get_fdata() m0data = mask_data * np.mean(m0data_smooth, axis=3) - if np.array(metadata["RepetitionTimePreparation"]).size > 1: - m0tr = np.array(metadata["RepetitionTimePreparation"])[m0_volume_idx] + if np.array(metadata['RepetitionTimePreparation']).size > 1: + m0tr = np.array(metadata['RepetitionTimePreparation'])[m0_volume_idx] else: - m0tr = metadata["RepetitionTimePreparation"] + m0tr = metadata['RepetitionTimePreparation'] if np.array(m0tr).size > 1 and np.std(m0tr) > 0: - raise ValueError("M0 scans have variable TR. ASLPrep does not support this.") + raise ValueError('M0 scans have variable TR. ASLPrep does not support this.') - elif metadata["M0Type"] == "Estimate": - m0data = metadata["M0Estimate"] * mask_data + elif metadata['M0Type'] == 'Estimate': + m0data = metadata['M0Estimate'] * mask_data m0tr = None - elif metadata["M0Type"] == "Absent": + elif metadata['M0Type'] == 'Absent': if control_volume_idx and not cbf_volume_idx: # BackgroundSuppression is required, so no need to use get(). - if metadata["BackgroundSuppression"]: + if metadata['BackgroundSuppression']: raise ValueError( - "Background-suppressed control volumes cannot be used for calibration." + 'Background-suppressed control volumes cannot be used for calibration.' ) if control_volume_idx: @@ -208,10 +208,10 @@ def _run_interface(self, runtime): m0data = mask_data * np.mean(control_img, axis=3) # Use the control volumes' TR as the M0 TR. - if np.array(metadata["RepetitionTimePreparation"]).size > 1: - m0tr = np.array(metadata["RepetitionTimePreparation"])[control_volume_idx[0]] + if np.array(metadata['RepetitionTimePreparation']).size > 1: + m0tr = np.array(metadata['RepetitionTimePreparation'])[control_volume_idx[0]] else: - m0tr = metadata["RepetitionTimePreparation"] + m0tr = metadata['RepetitionTimePreparation'] elif cbf_volume_idx: # If we have precalculated CBF data, we don't need M0, so we'll just use the mask. @@ -221,21 +221,21 @@ def _run_interface(self, runtime): else: raise RuntimeError( - "m0scan is absent, " - "and there are no control volumes that can be used as a substitute" + 'm0scan is absent, ' + 'and there are no control volumes that can be used as a substitute' ) else: - raise RuntimeError("no pathway to m0scan") + raise RuntimeError('no pathway to m0scan') if deltam_volume_idx: - config.loggers.interface.info("Extracting deltaM from ASL file.") + config.loggers.interface.info('Extracting deltaM from ASL file.') metadata_idx = deltam_volume_idx out_data = asl_data[:, :, :, deltam_volume_idx] elif label_volume_idx: config.loggers.interface.info( - "Calculating deltaM from label-control pairs in ASL file." + 'Calculating deltaM from label-control pairs in ASL file.' ) assert len(label_volume_idx) == len(control_volume_idx) metadata_idx = control_volume_idx @@ -248,16 +248,16 @@ def _run_interface(self, runtime): out_data = asl_data[:, :, :, cbf_volume_idx] else: - raise RuntimeError("No valid ASL or CBF image.") + raise RuntimeError('No valid ASL or CBF image.') # Remove volume-wise metadata for M0 scans as necessary VOLUME_WISE_FIELDS = [ - "PostLabelingDelay", - "VascularCrushingVENC", - "LabelingDuration", - "EchoTime", - "FlipAngle", - "RepetitionTimePreparation", + 'PostLabelingDelay', + 'VascularCrushingVENC', + 'LabelingDuration', + 'EchoTime', + 'FlipAngle', + 'RepetitionTimePreparation', ] for field in VOLUME_WISE_FIELDS: @@ -267,8 +267,8 @@ def _run_interface(self, runtime): value = metadata[field] if isinstance(value, list) and len(value) != asl_data.shape[3]: raise ValueError( - f"{field} is an array, but the number of values ({len(value)}) " - f"does not match the number of volumes in the ASL data ({asl_data.shape[3]})." + f'{field} is an array, but the number of values ({len(value)}) ' + f'does not match the number of volumes in the ASL data ({asl_data.shape[3]}).' ) elif isinstance(value, list): # Reduce to only the selected volumes @@ -280,23 +280,23 @@ def _run_interface(self, runtime): metadata[field] = value - self._results["metadata"] = metadata - self._results["m0tr"] = m0tr - self._results["out_file"] = fname_presuffix( + self._results['metadata'] = metadata + self._results['m0tr'] = m0tr + self._results['out_file'] = fname_presuffix( self.inputs.name_source, - suffix="_DeltaMOrCBF", + suffix='_DeltaMOrCBF', newpath=runtime.cwd, ) - self._results["m0_file"] = fname_presuffix( + self._results['m0_file'] = fname_presuffix( self.inputs.name_source, - suffix="_m0file", + suffix='_m0file', newpath=runtime.cwd, ) nb.Nifti1Image(out_data, asl_img.affine, asl_img.header).to_filename( - self._results["out_file"] + self._results['out_file'] ) nb.Nifti1Image(m0data, asl_img.affine, asl_img.header).to_filename( - self._results["m0_file"] + self._results['m0_file'] ) return runtime @@ -307,10 +307,10 @@ class _ComputeCBFInputSpec(BaseInterfaceInputSpec): exists=True, mandatory=True, desc=( - "NIfTI file containing raw CBF volume(s). " - "These raw CBF values are the result of subtracting label volumes from " - "control volumes, without any kind of additional scaling. " - "This file may be 3D or 4D." + 'NIfTI file containing raw CBF volume(s). ' + 'These raw CBF values are the result of subtracting label volumes from ' + 'control volumes, without any kind of additional scaling. ' + 'This file may be 3D or 4D.' ), ) metadata = traits.Dict( @@ -321,13 +321,13 @@ class _ComputeCBFInputSpec(BaseInterfaceInputSpec): m0_scale = traits.Float( exists=True, mandatory=True, - desc="Relative scale between ASL and M0.", + desc='Relative scale between ASL and M0.', ) - m0_file = File(exists=True, mandatory=True, desc="M0 nifti file") - mask = File(exists=True, mandatory=True, desc="Mask nifti file") + m0_file = File(exists=True, mandatory=True, desc='M0 nifti file') + mask = File(exists=True, mandatory=True, desc='Mask nifti file') cbf_only = traits.Bool( mandatory=True, - desc="Whether data are deltam (False) or CBF (True).", + desc='Whether data are deltam (False) or CBF (True).', ) @@ -335,18 +335,18 @@ class _ComputeCBFOutputSpec(TraitedSpec): cbf_ts = traits.Either( File(exists=True), None, - desc="Quantitative CBF time series, in mL/100g/min. Only generated for single-delay data.", + desc='Quantitative CBF time series, in mL/100g/min. Only generated for single-delay data.', ) - mean_cbf = File(exists=True, desc="Quantified CBF, averaged over time.") + mean_cbf = File(exists=True, desc='Quantified CBF, averaged over time.') att = traits.Either( File(exists=True), None, - desc="Arterial transit time map, in seconds. Only generated for multi-delay data.", + desc='Arterial transit time map, in seconds. Only generated for multi-delay data.', ) plds = traits.Either( File(exists=True), None, - desc="Post-labeling delays. Only defined if slice-timing correction is applied.", + desc='Post-labeling delays. Only defined if slice-timing correction is applied.', ) @@ -395,24 +395,24 @@ def _run_interface(self, runtime): deltam_file = self.inputs.deltam # control - label signal intensities if self.inputs.cbf_only: - config.loggers.interface.debug("CBF data detected. Skipping CBF estimation.") - self._results["cbf_ts"] = fname_presuffix( + config.loggers.interface.debug('CBF data detected. Skipping CBF estimation.') + self._results['cbf_ts'] = fname_presuffix( deltam_file, - suffix="_cbf_ts", + suffix='_cbf_ts', newpath=runtime.cwd, ) cbf_img = nb.load(deltam_file) - cbf_img.to_filename(self._results["cbf_ts"]) - self._results["mean_cbf"] = fname_presuffix( + cbf_img.to_filename(self._results['cbf_ts']) + self._results['mean_cbf'] = fname_presuffix( deltam_file, - suffix="_meancbf", + suffix='_meancbf', newpath=runtime.cwd, ) mean_cbf_img = image.mean_img(cbf_img) - mean_cbf_img.to_filename(self._results["mean_cbf"]) + mean_cbf_img.to_filename(self._results['mean_cbf']) # No ATT available for pre-calculated CBF - self._results["att"] = None + self._results['att'] = None return runtime @@ -423,7 +423,7 @@ def _run_interface(self, runtime): # PostLabelingDelay is either a single number or an array of numbers. # If it is an array of numbers, then there should be one value for every volume in the # time series, with any M0 volumes having a value of 0. - plds = np.atleast_1d(metadata["PostLabelingDelay"]) + plds = np.atleast_1d(metadata['PostLabelingDelay']) # Get labeling efficiency (alpha in Alsop 2015). labeleff = estimate_labeling_efficiency(metadata=metadata) @@ -435,43 +435,43 @@ def _run_interface(self, runtime): # NiftiMasker.transform, until 0.12.0, so the arrays will currently be 2D no matter what. masker = maskers.NiftiMasker(mask_img=mask_file) deltam_arr = masker.fit_transform(deltam_file).T # Transpose to SxT - assert deltam_arr.ndim == 2, f"deltam is {deltam_arr.ndim}" + assert deltam_arr.ndim == 2, f'deltam is {deltam_arr.ndim}' # Load the M0 map and average over time, in case there's more than one map in the file. m0data = masker.transform(m0_file) m0data = np.mean(m0data, axis=0) scaled_m0data = m0_scale * m0data - self._results["plds"] = None - if "SliceTiming" in metadata: + self._results['plds'] = None + if 'SliceTiming' in metadata: # Offset PLD(s) by slice times # This step builds a voxel-wise array of post-labeling delay values, # where voxels from each slice have the appropriately-shifted PLD value. # If there are multiple PLDs, then the second dimension of the PLD array will # correspond to volumes in the time series. config.loggers.interface.info( - "2D acquisition with slice timing information detected. " - "Shifting post-labeling delay values across the brain by slice times." + '2D acquisition with slice timing information detected. ' + 'Shifting post-labeling delay values across the brain by slice times.' ) - slice_times = np.array(metadata["SliceTiming"]) + slice_times = np.array(metadata['SliceTiming']) # Determine which axis slices come from. # ASL data typically acquires along z axis, from inferior to superior. - slice_encoding_direction = metadata.get("SliceEncodingDirection", "k") - slice_encoding_axis = "ijk".index(slice_encoding_direction[0]) + slice_encoding_direction = metadata.get('SliceEncodingDirection', 'k') + slice_encoding_axis = 'ijk'.index(slice_encoding_direction[0]) deltam_img = nb.load(deltam_file) shape = deltam_img.shape[:3] if slice_times.size != shape[slice_encoding_axis]: raise ValueError( - f"Number of slices ({shape[slice_encoding_axis]}) != " - f"slice times ({slice_times.size})" + f'Number of slices ({shape[slice_encoding_axis]}) != ' + f'slice times ({slice_times.size})' ) # Reverse the slice times if slices go from maximum index to zero. # This probably won't occur with ASL data though, since I --> S makes more sense than # S --> I. - if slice_encoding_direction.endswith("-"): + if slice_encoding_direction.endswith('-'): slice_times = slice_times[::-1] # Determine which dimensions to add to the slice times array, @@ -494,18 +494,18 @@ def _run_interface(self, runtime): # Write out the slice-shifted PLDs to the working directory, for debugging. pld_file = fname_presuffix( deltam_file, - suffix="_plds", + suffix='_plds', newpath=runtime.cwd, ) pld_img.to_filename(pld_file) - self._results["plds"] = pld_file + self._results['plds'] = pld_file elif is_multi_pld: # Broadcast PLDs to voxels by PLDs plds = np.dot(plds[:, None], np.ones((1, deltam_arr.shape[0]))).T if is_casl: - tau = np.array(metadata["LabelingDuration"]) + tau = np.array(metadata['LabelingDuration']) if is_multi_pld: if is_casl: @@ -524,49 +524,49 @@ def _run_interface(self, runtime): else: # Dai's approach can't be used on PASL data, so we'll need another method. raise ValueError( - "Multi-delay data are not supported for PASL sequences at the moment." + 'Multi-delay data are not supported for PASL sequences at the moment.' ) mean_cbf_img = masker.inverse_transform(mean_cbf) att_img = masker.inverse_transform(att) # Multi-delay data won't produce a CBF time series - self._results["cbf_ts"] = None - self._results["att"] = fname_presuffix( + self._results['cbf_ts'] = None + self._results['att'] = fname_presuffix( self.inputs.deltam, - suffix="_att", + suffix='_att', newpath=runtime.cwd, ) - att_img.to_filename(self._results["att"]) + att_img.to_filename(self._results['att']) else: # Single-delay if is_casl: denom_factor = t1blood * (1 - np.exp(-(tau / t1blood))) - elif not metadata["BolusCutOffFlag"]: + elif not metadata['BolusCutOffFlag']: raise ValueError( - "PASL without a bolus cut-off technique is not supported in ASLPrep." + 'PASL without a bolus cut-off technique is not supported in ASLPrep.' ) - elif metadata["BolusCutOffTechnique"] == "QUIPSS": + elif metadata['BolusCutOffTechnique'] == 'QUIPSS': # PASL + QUIPSS # Only one BolusCutOffDelayTime allowed. - assert isinstance(metadata["BolusCutOffDelayTime"], Number) - denom_factor = plds - metadata["BolusCutOffDelayTime"] # delta_TI, per Wong 1998 + assert isinstance(metadata['BolusCutOffDelayTime'], Number) + denom_factor = plds - metadata['BolusCutOffDelayTime'] # delta_TI, per Wong 1998 - elif metadata["BolusCutOffTechnique"] == "QUIPSSII": + elif metadata['BolusCutOffTechnique'] == 'QUIPSSII': # PASL + QUIPSSII # Per SD, use PLD as TI for PASL, so we will just use 'plds' in the numerator when # calculating the perfusion factor. # Only one BolusCutOffDelayTime allowed. - assert isinstance(metadata["BolusCutOffDelayTime"], Number) - denom_factor = metadata["BolusCutOffDelayTime"] # called TI1 in Alsop 2015 + assert isinstance(metadata['BolusCutOffDelayTime'], Number) + denom_factor = metadata['BolusCutOffDelayTime'] # called TI1 in Alsop 2015 - elif metadata["BolusCutOffTechnique"] == "Q2TIPS": + elif metadata['BolusCutOffTechnique'] == 'Q2TIPS': # PASL + Q2TIPS # Q2TIPS should have two BolusCutOffDelayTimes. - assert len(metadata["BolusCutOffDelayTime"]) == 2 - denom_factor = metadata["BolusCutOffDelayTime"][0] # called TI1 in Noguchi 2015 + assert len(metadata['BolusCutOffDelayTime']) == 2 + denom_factor = metadata['BolusCutOffDelayTime'][0] # called TI1 in Noguchi 2015 else: raise ValueError( @@ -575,8 +575,8 @@ def _run_interface(self, runtime): # Q2TIPS uses TI2 instead of w (PLD), see Noguchi 2015 for this info. exp_numerator = ( - metadata["BolusCutOffDelayTime"][1] - if metadata.get("BolusCutOffTechnique") == "Q2TIPS" + metadata['BolusCutOffDelayTime'][1] + if metadata.get('BolusCutOffTechnique') == 'Q2TIPS' else plds ) @@ -591,52 +591,52 @@ def _run_interface(self, runtime): cbf_ts = np.nan_to_num(cbf_ts, nan=0, posinf=0, neginf=0) cbf_ts_img = masker.inverse_transform(cbf_ts.T) mean_cbf_img = image.mean_img(cbf_ts_img) - self._results["cbf_ts"] = fname_presuffix( + self._results['cbf_ts'] = fname_presuffix( self.inputs.deltam, - suffix="_cbf", + suffix='_cbf', newpath=runtime.cwd, ) - cbf_ts_img.to_filename(self._results["cbf_ts"]) + cbf_ts_img.to_filename(self._results['cbf_ts']) # Single-delay data won't produce an ATT image - self._results["att"] = None + self._results['att'] = None # Mean CBF is returned no matter what - self._results["mean_cbf"] = fname_presuffix( + self._results['mean_cbf'] = fname_presuffix( self.inputs.deltam, - suffix="_meancbf", + suffix='_meancbf', newpath=runtime.cwd, ) - mean_cbf_img.to_filename(self._results["mean_cbf"]) + mean_cbf_img.to_filename(self._results['mean_cbf']) return runtime class _ScoreAndScrubCBFInputSpec(BaseInterfaceInputSpec): - cbf_ts = File(exists=True, mandatory=True, desc="Computed CBF from ComputeCBF.") - mask = File(exists=True, mandatory=True, desc="mask") - gm_tpm = File(exists=True, mandatory=True, desc="Gray matter tissue probability map.") - wm_tpm = File(exists=True, mandatory=True, desc="White matter tissue probability map.") - csf_tpm = File(exists=True, mandatory=True, desc="CSF tissue probability map.") + cbf_ts = File(exists=True, mandatory=True, desc='Computed CBF from ComputeCBF.') + mask = File(exists=True, mandatory=True, desc='mask') + gm_tpm = File(exists=True, mandatory=True, desc='Gray matter tissue probability map.') + wm_tpm = File(exists=True, mandatory=True, desc='White matter tissue probability map.') + csf_tpm = File(exists=True, mandatory=True, desc='CSF tissue probability map.') tpm_threshold = traits.Float( default_value=0.7, usedefault=True, mandatory=False, - desc="Tissue probability threshold for binarizing GM, WM, and CSF masks.", + desc='Tissue probability threshold for binarizing GM, WM, and CSF masks.', ) wavelet_function = traits.Str( - default_value="huber", + default_value='huber', usedefault=True, mandatory=False, - option=["bisquare", "andrews", "cauchy", "fair", "logistics", "ols", "talwar", "welsch"], - desc="Wavelet function", + option=['bisquare', 'andrews', 'cauchy', 'fair', 'logistics', 'ols', 'talwar', 'welsch'], + desc='Wavelet function', ) class _ScoreAndScrubCBFOutputSpec(TraitedSpec): - cbf_ts_score = File(exists=False, mandatory=False, desc="score timeseries data") - mean_cbf_score = File(exists=False, mandatory=False, desc="average score") - mean_cbf_scrub = File(exists=False, mandatory=False, desc="average scrub") - score_outlier_index = File(exists=False, mandatory=False, desc="index of volume remove ") + cbf_ts_score = File(exists=False, mandatory=False, desc='score timeseries data') + mean_cbf_score = File(exists=False, mandatory=False, desc='average score') + mean_cbf_scrub = File(exists=False, mandatory=False, desc='average scrub') + score_outlier_index = File(exists=False, mandatory=False, desc='index of volume remove ') class ScoreAndScrubCBF(SimpleInterface): @@ -684,31 +684,31 @@ def _run_interface(self, runtime): mean_cbf_score = np.mean(cbf_scorets, axis=3) else: config.loggers.interface.warning( - f"CBF time series is only {cbf_ts.ndim}D. Skipping SCORE and SCRUB." + f'CBF time series is only {cbf_ts.ndim}D. Skipping SCORE and SCRUB.' ) cbf_scorets = cbf_ts index_score = np.array([0]) cbfscrub = cbf_ts mean_cbf_score = cbf_ts - self._results["cbf_ts_score"] = fname_presuffix( + self._results['cbf_ts_score'] = fname_presuffix( self.inputs.cbf_ts, - suffix="_cbfscorets", + suffix='_cbfscorets', newpath=runtime.cwd, ) - self._results["mean_cbf_score"] = fname_presuffix( + self._results['mean_cbf_score'] = fname_presuffix( self.inputs.cbf_ts, - suffix="_meancbfscore", + suffix='_meancbfscore', newpath=runtime.cwd, ) - self._results["mean_cbf_scrub"] = fname_presuffix( + self._results['mean_cbf_scrub'] = fname_presuffix( self.inputs.cbf_ts, - suffix="_cbfscrub", + suffix='_cbfscrub', newpath=runtime.cwd, ) - self._results["score_outlier_index"] = fname_presuffix( + self._results['score_outlier_index'] = fname_presuffix( self.inputs.cbf_ts, - suffix="_scoreindex.tsv", + suffix='_scoreindex.tsv', newpath=runtime.cwd, use_ext=False, ) @@ -718,20 +718,20 @@ def _run_interface(self, runtime): dataobj=cbf_scorets, affine=samplecbf.affine, header=samplecbf.header, - ).to_filename(self._results["cbf_ts_score"]) + ).to_filename(self._results['cbf_ts_score']) nb.Nifti1Image( dataobj=mean_cbf_score, affine=samplecbf.affine, header=samplecbf.header, - ).to_filename(self._results["mean_cbf_score"]) + ).to_filename(self._results['mean_cbf_score']) nb.Nifti1Image( dataobj=cbfscrub, affine=samplecbf.affine, header=samplecbf.header, - ).to_filename(self._results["mean_cbf_scrub"]) + ).to_filename(self._results['mean_cbf_scrub']) - score_outlier_df = pd.DataFrame(columns=["score_outlier_index"], data=index_score) - score_outlier_df.to_csv(self._results["score_outlier_index"], sep="\t", index=False) + score_outlier_df = pd.DataFrame(columns=['score_outlier_index'], data=index_score) + score_outlier_df.to_csv(self._results['score_outlier_index'], sep='\t', index=False) return runtime @@ -742,106 +742,106 @@ class _BASILCBFInputSpec(FSLCommandInputSpec): deltam = File( exists=True, desc=( - "ASL data after subtracting tag-control or control-tag. " - "This matches with ``--iaf diff``, which is the default." + 'ASL data after subtracting tag-control or control-tag. ' + 'This matches with ``--iaf diff``, which is the default.' ), - argstr="-i %s", + argstr='-i %s', position=0, mandatory=True, ) mask = File( exists=True, - argstr="-m %s", - desc="mask in the same space as deltam", + argstr='-m %s', + desc='mask in the same space as deltam', mandatory=True, ) - mzero = File(exists=True, argstr="-c %s", desc="m0 scan", mandatory=False) - m0_scale = traits.Float(desc="calibration of asl", argstr="--cgain %.2f", mandatory=True) + mzero = File(exists=True, argstr='-c %s', desc='m0 scan', mandatory=False) + m0_scale = traits.Float(desc='calibration of asl', argstr='--cgain %.2f', mandatory=True) m0tr = traits.Float( - desc="The repetition time for the calibration image (the M0 scan).", - argstr="--tr %.2f", + desc='The repetition time for the calibration image (the M0 scan).', + argstr='--tr %.2f', mandatory=False, ) tis = traits.Either( traits.Float(), traits.List(traits.Float()), desc=( - "The list of inflow times (TIs), a comma separated list of values should be provided " - "(that matches the order in the data).\n\n" - "Note, the inflow time is the PLD plus bolus duration for pcASL (and cASL), " - "it equals the inversion time for pASL. " - "If the data contains multiple repeats of the same set of TIs then it is only " - "necessary to list the unique TIs.\n\n" - "When using the ``--tis=`` you can specify a full list of all TIs/PLDs in the data " - "(i.e., as many entries as there are label-control pairs). " - "Or, if you have a number of TIs/PLDs repeated multiple times you can just list the " - "unique TIs in order and ``oxford_asl`` will automatically replicate that list to " - "match the number of repeated measurements in the data. " - "If you have a variable number of repeats at each TI/PLD then either list all TIs " - "or use the ``--rpts=`` option (see below)." + 'The list of inflow times (TIs), a comma separated list of values should be provided ' + '(that matches the order in the data).\n\n' + 'Note, the inflow time is the PLD plus bolus duration for pcASL (and cASL), ' + 'it equals the inversion time for pASL. ' + 'If the data contains multiple repeats of the same set of TIs then it is only ' + 'necessary to list the unique TIs.\n\n' + 'When using the ``--tis=`` you can specify a full list of all TIs/PLDs in the data ' + '(i.e., as many entries as there are label-control pairs). ' + 'Or, if you have a number of TIs/PLDs repeated multiple times you can just list the ' + 'unique TIs in order and ``oxford_asl`` will automatically replicate that list to ' + 'match the number of repeated measurements in the data. ' + 'If you have a variable number of repeats at each TI/PLD then either list all TIs ' + 'or use the ``--rpts=`` option (see below).' ), - argstr="--tis %s", + argstr='--tis %s', mandatory=True, - sep=",", + sep=',', ) pcasl = traits.Bool( desc=( - "Data were acquired using cASL or pcASL labelling " - "(pASL labeling is assumed by default)." + 'Data were acquired using cASL or pcASL labelling ' + '(pASL labeling is assumed by default).' ), - argstr="--casl", + argstr='--casl', mandatory=False, default_value=False, ) bolus = traits.Either( traits.Float(), traits.List(traits.Float()), - desc="bolus or tau: label duration", - argstr="--bolus %s", + desc='bolus or tau: label duration', + argstr='--bolus %s', mandatory=True, - sep=",", + sep=',', ) slice_spacing = traits.Float( - desc="Slice times", - argstr="--slicedt %s", + desc='Slice times', + argstr='--slicedt %s', mandatory=False, ) pvc = traits.Bool( - desc="Do partial volume correction.", + desc='Do partial volume correction.', mandatory=False, - argstr="--pvcorr", + argstr='--pvcorr', default_value=True, ) gm_tpm = File( exists=True, mandatory=False, - desc="Partial volume estimates for GM. This is just a GM tissue probability map.", - argstr="--pvgm %s", + desc='Partial volume estimates for GM. This is just a GM tissue probability map.', + argstr='--pvgm %s', ) wm_tpm = File( exists=True, mandatory=False, - desc="Partial volume estimates for WM. This is just a WM tissue probability map.", - argstr="--pvwm %s", + desc='Partial volume estimates for WM. This is just a WM tissue probability map.', + argstr='--pvwm %s', ) alpha = traits.Float( desc=( "Inversion efficiency - [default: 0.98 (pASL); 0.85 (cASL)]. " "This is equivalent to the BIDS metadata field 'LabelingEfficiency'." ), - argstr="--alpha %.2f", + argstr='--alpha %.2f', ) - out_basename = File(desc="base name of output files", argstr="-o %s", mandatory=True) + out_basename = File(desc='base name of output files', argstr='-o %s', mandatory=True) class _BASILCBFOutputSpec(TraitedSpec): - mean_cbf_basil = File(exists=True, desc="cbf with spatial correction") - mean_cbf_gm_basil = File(exists=True, desc="cbf with spatial correction") + mean_cbf_basil = File(exists=True, desc='cbf with spatial correction') + mean_cbf_gm_basil = File(exists=True, desc='cbf with spatial correction') mean_cbf_wm_basil = File( exists=True, - desc="cbf with spatial partial volume white matter correction", + desc='cbf with spatial partial volume white matter correction', ) - att_basil = File(exists=True, desc="arterial transit time") + att_basil = File(exists=True, desc='arterial transit time') class BASILCBF(FSLCommand): @@ -856,7 +856,7 @@ class BASILCBF(FSLCommand): See https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BASIL and https://asl-docs.readthedocs.io. """ - _cmd = "oxford_asl" + _cmd = 'oxford_asl' input_spec = _BASILCBFInputSpec output_spec = _BASILCBFOutputSpec @@ -874,15 +874,15 @@ def _list_outputs(self): outputs = self.output_spec().get() - outputs["mean_cbf_basil"] = os.path.join(basename, "native_space/perfusion_calib.nii.gz") - outputs["att_basil"] = os.path.join(basename, "native_space/arrival.nii.gz") - outputs["mean_cbf_gm_basil"] = os.path.join( + outputs['mean_cbf_basil'] = os.path.join(basename, 'native_space/perfusion_calib.nii.gz') + outputs['att_basil'] = os.path.join(basename, 'native_space/arrival.nii.gz') + outputs['mean_cbf_gm_basil'] = os.path.join( basename, - "native_space/pvcorr/perfusion_calib.nii.gz", + 'native_space/pvcorr/perfusion_calib.nii.gz', ) - outputs["mean_cbf_wm_basil"] = os.path.join( + outputs['mean_cbf_wm_basil'] = os.path.join( basename, - "native_space/pvcorr/perfusion_wm_calib.nii.gz", + 'native_space/pvcorr/perfusion_wm_calib.nii.gz', ) return outputs diff --git a/aslprep/interfaces/confounds.py b/aslprep/interfaces/confounds.py index dddd35af6..b382d5f7f 100644 --- a/aslprep/interfaces/confounds.py +++ b/aslprep/interfaces/confounds.py @@ -29,17 +29,17 @@ class _GatherConfoundsInputSpec(BaseInterfaceInputSpec): - signals = File(exists=True, desc="input signals") - dvars = File(exists=True, desc="file containing DVARS") - rmsd = File(exists=True, desc="input RMS framewise displacement") - std_dvars = File(exists=True, desc="file containing standardized DVARS") - fd = File(exists=True, desc="input framewise displacement") - motion = File(exists=True, desc="input motion parameters") + signals = File(exists=True, desc='input signals') + dvars = File(exists=True, desc='file containing DVARS') + rmsd = File(exists=True, desc='input RMS framewise displacement') + std_dvars = File(exists=True, desc='file containing standardized DVARS') + fd = File(exists=True, desc='input framewise displacement') + motion = File(exists=True, desc='input motion parameters') class _GatherConfoundsOutputSpec(TraitedSpec): - confounds_file = File(exists=True, desc="output confounds file") - confounds_list = traits.List(traits.Str, desc="list of headers") + confounds_file = File(exists=True, desc='output confounds file') + confounds_list = traits.List(traits.Str, desc='list of headers') class GatherConfounds(SimpleInterface): @@ -58,19 +58,19 @@ def _run_interface(self, runtime): motion=self.inputs.motion, newpath=runtime.cwd, ) - self._results["confounds_file"] = combined_out - self._results["confounds_list"] = confounds_list + self._results['confounds_file'] = combined_out + self._results['confounds_list'] = confounds_list return runtime class _GatherCBFConfoundsInputSpec(BaseInterfaceInputSpec): - signals = File(exists=True, desc="input signals") - score = File(exists=True, desc="SCORE outlier index") + signals = File(exists=True, desc='input signals') + score = File(exists=True, desc='SCORE outlier index') class _GatherCBFConfoundsOutputSpec(TraitedSpec): - confounds_file = File(exists=True, desc="output confounds file") - confounds_list = traits.List(traits.Str, desc="list of headers") + confounds_file = File(exists=True, desc='output confounds file') + confounds_list = traits.List(traits.Str, desc='list of headers') class GatherCBFConfounds(SimpleInterface): @@ -90,18 +90,18 @@ def _run_interface(self, runtime): score=self.inputs.score, newpath=runtime.cwd, ) - self._results["confounds_file"] = combined_out - self._results["confounds_list"] = confounds_list + self._results['confounds_file'] = combined_out + self._results['confounds_list'] = confounds_list return runtime class _NormalizeMotionParamsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="the input parameters file") - format = traits.Enum("FSL", "AFNI", "FSFAST", "NIPY", usedefault=True, desc="output format") + in_file = File(exists=True, mandatory=True, desc='the input parameters file') + format = traits.Enum('FSL', 'AFNI', 'FSFAST', 'NIPY', usedefault=True, desc='output format') class _NormalizeMotionParamsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class NormalizeMotionParams(SimpleInterface): @@ -125,8 +125,8 @@ def _run_interface(self, runtime): mpars = np.apply_along_axis( func1d=normalize_mc_params, axis=1, arr=mpars, source=self.inputs.format ) - self._results["out_file"] = os.path.join(runtime.cwd, "motion_params.txt") - np.savetxt(self._results["out_file"], mpars) + self._results['out_file'] = os.path.join(runtime.cwd, 'motion_params.txt') + np.savetxt(self._results['out_file'], mpars) return runtime @@ -134,49 +134,49 @@ class _ComputeCBFQCInputSpec(BaseInterfaceInputSpec): name_source = File( exists=True, mandatory=True, - desc="Original asl_file. Used to extract entity information.", + desc='Original asl_file. Used to extract entity information.', ) - mean_cbf = File(exists=True, mandatory=True, desc="Mean CBF from standard CBF calculation.") + mean_cbf = File(exists=True, mandatory=True, desc='Mean CBF from standard CBF calculation.') # SCORE/SCRUB inputs - mean_cbf_score = File(exists=True, mandatory=False, desc="Mean CBF after SCORE censoring.") - mean_cbf_scrub = File(exists=True, mandatory=False, desc="Mean CBF after SCRUB denoising.") + mean_cbf_score = File(exists=True, mandatory=False, desc='Mean CBF after SCORE censoring.') + mean_cbf_scrub = File(exists=True, mandatory=False, desc='Mean CBF after SCRUB denoising.') # BASIL inputs - mean_cbf_basil = File(exists=True, mandatory=False, desc="Mean CBF produced by BASIL.") + mean_cbf_basil = File(exists=True, mandatory=False, desc='Mean CBF produced by BASIL.') mean_cbf_gm_basil = File( exists=True, mandatory=False, - desc="GM partial volume corrected CBF with BASIL.", + desc='GM partial volume corrected CBF with BASIL.', ) # Tissue probability maps and masks - gm_tpm = File(exists=True, mandatory=True, desc="Gray matter tissue probability map") - wm_tpm = File(exists=True, mandatory=True, desc="White matter tissue probability map") - csf_tpm = File(exists=True, mandatory=True, desc="CSF tissue probability map") - asl_mask = File(exists=True, mandatory=True, desc="ASL mask in native ASL reference space") - t1w_mask = File(exists=True, mandatory=True, desc="T1w mask in native space") - asl_mask_std = File(exists=True, mandatory=False, desc="ASL mask in standard space") - template_mask = File(exists=True, mandatory=False, desc="template mask or image") + gm_tpm = File(exists=True, mandatory=True, desc='Gray matter tissue probability map') + wm_tpm = File(exists=True, mandatory=True, desc='White matter tissue probability map') + csf_tpm = File(exists=True, mandatory=True, desc='CSF tissue probability map') + asl_mask = File(exists=True, mandatory=True, desc='ASL mask in native ASL reference space') + t1w_mask = File(exists=True, mandatory=True, desc='T1w mask in native space') + asl_mask_std = File(exists=True, mandatory=False, desc='ASL mask in standard space') + template_mask = File(exists=True, mandatory=False, desc='template mask or image') tpm_threshold = traits.Float( default_value=0.7, usedefault=True, mandatory=False, - desc="Tissue probability threshold for binarizing GM, WM, and CSF masks.", + desc='Tissue probability threshold for binarizing GM, WM, and CSF masks.', ) # Non-GE-only inputs confounds_file = File( exists=True, mandatory=False, - desc="Confounds file. Will not be defined for GE data.", + desc='Confounds file. Will not be defined for GE data.', ) rmsd_file = File( exists=True, mandatory=False, - desc="RMSD file. Will not be defined for GE data.", + desc='RMSD file. Will not be defined for GE data.', ) class _ComputeCBFQCOutputSpec(TraitedSpec): - qc_file = File(exists=True, desc="qc file") - qc_metadata = File(exists=True, desc="qc metadata") + qc_file = File(exists=True, desc='qc file') + qc_metadata = File(exists=True, desc='qc metadata') class ComputeCBFQC(SimpleInterface): @@ -195,9 +195,9 @@ def _run_interface(self, runtime): confounds_df = pd.read_table(self.inputs.confounds_file) confounds_df.fillna(0, inplace=True) - if "framewise_displacement" in confounds_df.columns: + if 'framewise_displacement' in confounds_df.columns: # FD and RMSD only available for multi-volume datasets - mean_fd = np.mean(confounds_df["framewise_displacement"]) + mean_fd = np.mean(confounds_df['framewise_displacement']) mean_rms = pd.read_csv(self.inputs.rmsd_file, header=None).mean().values[0] else: mean_fd = np.nan @@ -257,7 +257,7 @@ def _run_interface(self, runtime): thresh=thresh, ) else: - print("no score inputs, setting to np.nan") + print('no score inputs, setting to np.nan') qei_cbf_score = np.nan qei_cbf_scrub = np.nan percentage_negative_cbf_score = np.nan @@ -289,7 +289,7 @@ def _run_interface(self, runtime): thresh=thresh, ) else: - print("no basil inputs, setting to np.nan") + print('no basil inputs, setting to np.nan') qei_cbf_basil = np.nan qei_cbf_basil_gm = np.nan percentage_negative_cbf_basil = np.nan @@ -303,219 +303,219 @@ def _run_interface(self, runtime): ) metrics_dict = { - "mean_fd": [mean_fd], - "rmsd": [mean_rms], - "coreg_dice": [coreg_dice], - "coreg_correlation": [coreg_correlation], - "coreg_overlap": [coreg_overlap], - "qei_cbf": [qei_cbf], - "qei_cbf_score": [qei_cbf_score], - "qei_cbf_scrub": [qei_cbf_scrub], - "qei_cbf_basil": [qei_cbf_basil], - "qei_cbf_basil_gm": [qei_cbf_basil_gm], - "mean_gm_cbf": [mean_cbf_mean[0]], - "mean_wm_cbf": [mean_cbf_mean[1]], - "ratio_gm_wm_cbf": [ratio_gm_wm_cbf], - "percentage_negative_cbf": [percentage_negative_cbf], - "percentage_negative_cbf_score": [percentage_negative_cbf_score], - "percentage_negative_cbf_scrub": [percentage_negative_cbf_scrub], - "percentage_negative_cbf_basil": [percentage_negative_cbf_basil], - "percentage_negative_cbf_basil_gm": [percentage_negative_cbf_basil_gm], + 'mean_fd': [mean_fd], + 'rmsd': [mean_rms], + 'coreg_dice': [coreg_dice], + 'coreg_correlation': [coreg_correlation], + 'coreg_overlap': [coreg_overlap], + 'qei_cbf': [qei_cbf], + 'qei_cbf_score': [qei_cbf_score], + 'qei_cbf_scrub': [qei_cbf_scrub], + 'qei_cbf_basil': [qei_cbf_basil], + 'qei_cbf_basil_gm': [qei_cbf_basil_gm], + 'mean_gm_cbf': [mean_cbf_mean[0]], + 'mean_wm_cbf': [mean_cbf_mean[1]], + 'ratio_gm_wm_cbf': [ratio_gm_wm_cbf], + 'percentage_negative_cbf': [percentage_negative_cbf], + 'percentage_negative_cbf_score': [percentage_negative_cbf_score], + 'percentage_negative_cbf_scrub': [percentage_negative_cbf_scrub], + 'percentage_negative_cbf_basil': [percentage_negative_cbf_basil], + 'percentage_negative_cbf_basil_gm': [percentage_negative_cbf_basil_gm], } qc_metadata = { - "mean_fd": { - "LongName": "Mean Framewise Displacement", - "Description": ( - "Average framewise displacement without any motion parameter filtering. " - "This value includes high-motion outliers, but not dummy volumes. " - "FD is calculated according to the Power definition." + 'mean_fd': { + 'LongName': 'Mean Framewise Displacement', + 'Description': ( + 'Average framewise displacement without any motion parameter filtering. ' + 'This value includes high-motion outliers, but not dummy volumes. ' + 'FD is calculated according to the Power definition.' ), - "Units": "mm", - "Term URL": "https://doi.org/10.1016/j.neuroimage.2011.10.018", + 'Units': 'mm', + 'Term URL': 'https://doi.org/10.1016/j.neuroimage.2011.10.018', }, - "rmsd": { - "LongName": "Mean Relative Root Mean Squared", - "Description": ( + 'rmsd': { + 'LongName': 'Mean Relative Root Mean Squared', + 'Description': ( "Average relative root mean squared calculated from motion parameters, " "after removal of dummy volumes and high-motion outliers. " "Relative in this case means 'relative to the previous scan'." ), - "Units": "arbitrary", + 'Units': 'arbitrary', }, - "coreg_dice": { - "LongName": "Coregistration Sørensen-Dice Coefficient", - "Description": ( - "The Sørensen-Dice coefficient calculated between the binary brain masks from " - "the coregistered anatomical and ASL reference images. " - "Values are bounded between 0 and 1, " - "with higher values indicating better coregistration." + 'coreg_dice': { + 'LongName': 'Coregistration Sørensen-Dice Coefficient', + 'Description': ( + 'The Sørensen-Dice coefficient calculated between the binary brain masks from ' + 'the coregistered anatomical and ASL reference images. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient', }, - "coreg_jaccard": { - "LongName": "Coregistration Jaccard Index", - "Description": ( - "The Jaccard index calculated between the binary brain masks from " - "the coregistered anatomical and ASL reference images. " - "Values are bounded between 0 and 1, " - "with higher values indicating better coregistration." + 'coreg_jaccard': { + 'LongName': 'Coregistration Jaccard Index', + 'Description': ( + 'The Jaccard index calculated between the binary brain masks from ' + 'the coregistered anatomical and ASL reference images. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": "https://en.wikipedia.org/wiki/Jaccard_index", + 'Term URL': 'https://en.wikipedia.org/wiki/Jaccard_index', }, - "coreg_correlation": { - "LongName": "Coregistration Pearson Correlation", - "Description": ( - "The Pearson correlation coefficient calculated between the binary brain " - "masks from the coregistered anatomical and ASL reference images. " - "Values are bounded between -1 and 1, " - "with higher values indicating better coregistration." + 'coreg_correlation': { + 'LongName': 'Coregistration Pearson Correlation', + 'Description': ( + 'The Pearson correlation coefficient calculated between the binary brain ' + 'masks from the coregistered anatomical and ASL reference images. ' + 'Values are bounded between -1 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Pearson_correlation_coefficient', }, - "coreg_overlap": { - "LongName": "Coregistration Overlap Coefficient", - "Description": ( - "The Szymkiewicz-Simpson overlap coefficient calculated between the binary " - "brain masks from the coregistered anatomical and ASL reference images. " - "Higher values indicate better normalization." + 'coreg_overlap': { + 'LongName': 'Coregistration Overlap Coefficient', + 'Description': ( + 'The Szymkiewicz-Simpson overlap coefficient calculated between the binary ' + 'brain masks from the coregistered anatomical and ASL reference images. ' + 'Higher values indicate better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Overlap_coefficient', }, - "qei_cbf": { - "LongName": "Cerebral Blood Flow Quality Evaluation Index", - "Description": "QEI calculated on mean CBF image.", - "Term URL": "http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html", + 'qei_cbf': { + 'LongName': 'Cerebral Blood Flow Quality Evaluation Index', + 'Description': 'QEI calculated on mean CBF image.', + 'Term URL': 'http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html', }, - "qei_cbf_score": { - "LongName": "SCORE-Denoised Cerebral Blood Flow Quality Evaluation Index", - "Description": "QEI calculated on mean SCORE-denoised CBF image.", - "Term URL": "http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html", + 'qei_cbf_score': { + 'LongName': 'SCORE-Denoised Cerebral Blood Flow Quality Evaluation Index', + 'Description': 'QEI calculated on mean SCORE-denoised CBF image.', + 'Term URL': 'http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html', }, - "qei_cbf_scrub": { - "LongName": "SCRUB-Denoised Cerebral Blood Flow Quality Evaluation Index", - "Description": "QEI calculated on mean SCRUB-denoised CBF image.", - "Term URL": "http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html", + 'qei_cbf_scrub': { + 'LongName': 'SCRUB-Denoised Cerebral Blood Flow Quality Evaluation Index', + 'Description': 'QEI calculated on mean SCRUB-denoised CBF image.', + 'Term URL': 'http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html', }, - "qei_cbf_basil": { - "LongName": "BASIL Cerebral Blood Flow Quality Evaluation Index", - "Description": "QEI calculated on CBF image produced by BASIL.", - "Term URL": "http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html", + 'qei_cbf_basil': { + 'LongName': 'BASIL Cerebral Blood Flow Quality Evaluation Index', + 'Description': 'QEI calculated on CBF image produced by BASIL.', + 'Term URL': 'http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html', }, - "qei_cbf_basil_gm": { - "LongName": ( - "BASIL Partial Volume Corrected Cerebral Blood Flow Quality Evaluation Index" + 'qei_cbf_basil_gm': { + 'LongName': ( + 'BASIL Partial Volume Corrected Cerebral Blood Flow Quality Evaluation Index' ), - "Description": ( - "QEI calculated on partial volume-corrected CBF image produced by BASIL." + 'Description': ( + 'QEI calculated on partial volume-corrected CBF image produced by BASIL.' ), - "Term URL": "http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html", + 'Term URL': 'http://indexsmart.mirasmart.com/ISMRM2017/PDFfiles/0682.html', }, - "mean_gm_cbf": { - "LongName": "Mean Cerebral Blood Flow of Gray Matter", - "Description": "Mean CBF value of gray matter.", - "Units": "mL/100 g/min", + 'mean_gm_cbf': { + 'LongName': 'Mean Cerebral Blood Flow of Gray Matter', + 'Description': 'Mean CBF value of gray matter.', + 'Units': 'mL/100 g/min', }, - "mean_wm_cbf": { - "LongName": "Mean Cerebral Blood Flow of White Matter", - "Description": "Mean CBF value of white matter.", - "Units": "mL/100 g/min", + 'mean_wm_cbf': { + 'LongName': 'Mean Cerebral Blood Flow of White Matter', + 'Description': 'Mean CBF value of white matter.', + 'Units': 'mL/100 g/min', }, - "ratio_gm_wm_cbf": { - "LongName": "Mean Gray Matter-White Matter Cerebral Blood Flow Ratio", - "Description": ( - "The ratio between the mean gray matter and mean white matter CBF values." + 'ratio_gm_wm_cbf': { + 'LongName': 'Mean Gray Matter-White Matter Cerebral Blood Flow Ratio', + 'Description': ( + 'The ratio between the mean gray matter and mean white matter CBF values.' ), }, - "percentage_negative_cbf": { - "LongName": "Percentage of Negative Cerebral Blood Flow Values", - "Description": ( - "Percentage of negative CBF values, calculated on the mean CBF image." + 'percentage_negative_cbf': { + 'LongName': 'Percentage of Negative Cerebral Blood Flow Values', + 'Description': ( + 'Percentage of negative CBF values, calculated on the mean CBF image.' ), - "Units": "percent", + 'Units': 'percent', }, - "percentage_negative_cbf_score": { - "LongName": "Percentage of Negative SCORE-Denoised Cerebral Blood Flow Values", - "Description": ( - "Percentage of negative CBF values, calculated on the SCORE-denoised " - "CBF image." + 'percentage_negative_cbf_score': { + 'LongName': 'Percentage of Negative SCORE-Denoised Cerebral Blood Flow Values', + 'Description': ( + 'Percentage of negative CBF values, calculated on the SCORE-denoised ' + 'CBF image.' ), - "Units": "percent", + 'Units': 'percent', }, - "percentage_negative_cbf_scrub": { - "LongName": "Percentage of Negative SCRUB-Denoised Cerebral Blood Flow Values", - "Description": ( - "Percentage of negative CBF values, calculated on the SCRUB-denoised " - "CBF image." + 'percentage_negative_cbf_scrub': { + 'LongName': 'Percentage of Negative SCRUB-Denoised Cerebral Blood Flow Values', + 'Description': ( + 'Percentage of negative CBF values, calculated on the SCRUB-denoised ' + 'CBF image.' ), - "Units": "percent", + 'Units': 'percent', }, - "percentage_negative_cbf_basil": { - "LongName": "Percentage of Negative BASIL Cerebral Blood Flow Values", - "Description": ( - "Percentage of negative CBF values, calculated on CBF image produced by BASIL." + 'percentage_negative_cbf_basil': { + 'LongName': 'Percentage of Negative BASIL Cerebral Blood Flow Values', + 'Description': ( + 'Percentage of negative CBF values, calculated on CBF image produced by BASIL.' ), - "Units": "percent", + 'Units': 'percent', }, - "percentage_negative_cbf_basil_gm": { - "LongName": ( - "Percentage of Negative BASIL Partial Volume Corrected Cerebral Blood Flow " - "Values" + 'percentage_negative_cbf_basil_gm': { + 'LongName': ( + 'Percentage of Negative BASIL Partial Volume Corrected Cerebral Blood Flow ' + 'Values' ), - "Description": ( - "Percentage of negative CBF values, calculated on partial volume-corrected " - "CBF image produced by BASIL." + 'Description': ( + 'Percentage of negative CBF values, calculated on partial volume-corrected ' + 'CBF image produced by BASIL.' ), - "Units": "percent", + 'Units': 'percent', }, } if self.inputs.asl_mask_std and self.inputs.template_mask: metrics_dict.update( { - "norm_dice": [norm_dice], - "norm_correlation": [norm_correlation], - "norm_overlap": [norm_overlap], + 'norm_dice': [norm_dice], + 'norm_correlation': [norm_correlation], + 'norm_overlap': [norm_overlap], } ) qc_metadata.update( { - "norm_dice": { - "LongName": "Normalization Sørensen-Dice Coefficient", - "Description": ( - "The Sørensen-Dice coefficient calculated between the binary brain " - "masks from the normalized ASL reference image and the associated " - "template. " - "Values are bounded between 0 and 1, " - "with higher values indicating better normalization." + 'norm_dice': { + 'LongName': 'Normalization Sørensen-Dice Coefficient', + 'Description': ( + 'The Sørensen-Dice coefficient calculated between the binary brain ' + 'masks from the normalized ASL reference image and the associated ' + 'template. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better normalization.' ), - "Term URL": ( - "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient" + 'Term URL': ( + 'https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient' ), }, - "norm_correlation": { - "LongName": "Normalization Pearson Correlation", - "Description": ( - "The Pearson correlation coefficient calculated between the binary " - "brain masks from the normalized ASL reference image and the " - "associated template. " - "Values are bounded between -1 and 1, " - "with higher values indicating better coregistration." + 'norm_correlation': { + 'LongName': 'Normalization Pearson Correlation', + 'Description': ( + 'The Pearson correlation coefficient calculated between the binary ' + 'brain masks from the normalized ASL reference image and the ' + 'associated template. ' + 'Values are bounded between -1 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": ( - "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient" + 'Term URL': ( + 'https://en.wikipedia.org/wiki/Pearson_correlation_coefficient' ), }, - "norm_overlap": { - "LongName": "Normalization Overlap Coefficient", - "Description": ( - "The Szymkiewicz-Simpson overlap coefficient calculated between the " - "binary brain masks from the normalized ASL reference image and the " - "associated template. " - "Higher values indicate better normalization." + 'norm_overlap': { + 'LongName': 'Normalization Overlap Coefficient', + 'Description': ( + 'The Szymkiewicz-Simpson overlap coefficient calculated between the ' + 'binary brain masks from the normalized ASL reference image and the ' + 'associated template. ' + 'Higher values indicate better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Overlap_coefficient', }, } ) @@ -523,28 +523,28 @@ def _run_interface(self, runtime): # Extract entities from the input file. # Useful for identifying ASL files after concatenating the QC files across runs. base_file = os.path.basename(self.inputs.name_source) - entities = base_file.split("_")[:-1] - entities_dict = {ent.split("-")[0]: ent.split("-")[1] for ent in entities} + entities = base_file.split('_')[:-1] + entities_dict = {ent.split('-')[0]: ent.split('-')[1] for ent in entities} # Combine the dictionaries and convert to a DataFrame. qc_dict = {**entities_dict, **metrics_dict} qc_df = pd.DataFrame(qc_dict) - self._results["qc_file"] = fname_presuffix( + self._results['qc_file'] = fname_presuffix( self.inputs.mean_cbf, - suffix="qc_cbf.tsv", + suffix='qc_cbf.tsv', newpath=runtime.cwd, use_ext=False, ) - qc_df.to_csv(self._results["qc_file"], index=False, header=True, sep="\t", na_rep="n/a") + qc_df.to_csv(self._results['qc_file'], index=False, header=True, sep='\t', na_rep='n/a') - self._results["qc_metadata"] = fname_presuffix( + self._results['qc_metadata'] = fname_presuffix( self.inputs.mean_cbf, - suffix="qc_cbf.json", + suffix='qc_cbf.json', newpath=runtime.cwd, use_ext=False, ) - with open(self._results["qc_metadata"], "w") as fo: + with open(self._results['qc_metadata'], 'w') as fo: json.dump(qc_metadata, fo, indent=4, sort_keys=True) return runtime diff --git a/aslprep/interfaces/parcellation.py b/aslprep/interfaces/parcellation.py index cf8cc2b98..e235c1d7d 100644 --- a/aslprep/interfaces/parcellation.py +++ b/aslprep/interfaces/parcellation.py @@ -15,29 +15,29 @@ ) from nipype.utils.filemanip import fname_presuffix -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _ParcellateCBFInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="File to be parcellated.") - mask = File(exists=True, mandatory=True, desc="brain mask file") - atlas = File(exists=True, mandatory=True, desc="atlas file") - atlas_labels = File(exists=True, mandatory=True, desc="atlas labels file") + in_file = File(exists=True, mandatory=True, desc='File to be parcellated.') + mask = File(exists=True, mandatory=True, desc='brain mask file') + atlas = File(exists=True, mandatory=True, desc='atlas file') + atlas_labels = File(exists=True, mandatory=True, desc='atlas labels file') min_coverage = traits.Float( default=0.5, usedefault=True, desc=( - "Coverage threshold to apply to parcels. " - "Any parcels with lower coverage than the threshold will be replaced with NaNs. " - "Must be a value between zero and one. " - "Default is 0.5." + 'Coverage threshold to apply to parcels. ' + 'Any parcels with lower coverage than the threshold will be replaced with NaNs. ' + 'Must be a value between zero and one. ' + 'Default is 0.5.' ), ) class _ParcellateCBFOutputSpec(TraitedSpec): - timeseries = File(exists=True, desc="Parcellated time series file.") - coverage = File(exists=True, desc="Parcel-wise coverage file.") + timeseries = File(exists=True, desc='Parcellated time series file.') + coverage = File(exists=True, desc='Parcel-wise coverage file.') class ParcellateCBF(SimpleInterface): @@ -56,14 +56,14 @@ def _run_interface(self, runtime): atlas = self.inputs.atlas min_coverage = self.inputs.min_coverage - node_labels_df = pd.read_table(self.inputs.atlas_labels, index_col="index") + node_labels_df = pd.read_table(self.inputs.atlas_labels, index_col='index') # Fix any nonsequential values or mismatch between atlas and DataFrame. atlas_img, node_labels_df = _sanitize_nifti_atlas(atlas, node_labels_df) - node_labels = node_labels_df["label"].tolist() + node_labels = node_labels_df['label'].tolist() # prepend "background" to node labels to satisfy NiftiLabelsMasker # The background "label" won't be present in the output timeseries. - masker_labels = ["background"] + node_labels + masker_labels = ['background'] + node_labels # Before anything, we need to measure coverage atlas_img_bin = nb.Nifti1Image( @@ -79,7 +79,7 @@ def _run_interface(self, runtime): mask_img=mask, smoothing_fwhm=None, standardize=False, - strategy="sum", + strategy='sum', resampling_target=None, # they should be in the same space/resolution already ) sum_masker_unmasked = NiftiLabelsMasker( @@ -88,7 +88,7 @@ def _run_interface(self, runtime): background_label=0, smoothing_fwhm=None, standardize=False, - strategy="sum", + strategy='sum', resampling_target=None, # they should be in the same space/resolution already ) n_voxels_in_masked_parcels = sum_masker_masked.fit_transform(atlas_img_bin) @@ -109,11 +109,11 @@ def _run_interface(self, runtime): if n_found_nodes != n_nodes: LOGGER.warning( - f"{n_nodes - n_found_nodes}/{n_nodes} of parcels not found in atlas file." + f'{n_nodes - n_found_nodes}/{n_nodes} of parcels not found in atlas file.' ) if n_bad_nodes: - LOGGER.warning(f"{n_bad_nodes}/{n_nodes} of parcels have 0% coverage.") + LOGGER.warning(f'{n_bad_nodes}/{n_nodes} of parcels have 0% coverage.') if n_poor_parcels: LOGGER.warning( @@ -149,7 +149,7 @@ def _run_interface(self, runtime): timeseries_arr[:, coverage_thresholded] = np.nan # Region indices in the atlas may not be sequential, so we map them to sequential ints. - seq_mapper = {idx: i for i, idx in enumerate(node_labels_df["sanitized_index"].tolist())} + seq_mapper = {idx: i for i, idx in enumerate(node_labels_df['sanitized_index'].tolist())} if n_found_nodes != n_nodes: # parcels lost by warping/downsampling atlas # Fill in any missing nodes in the timeseries array with NaNs. @@ -175,26 +175,26 @@ def _run_interface(self, runtime): del new_parcel_coverage # The time series file is tab-delimited, with node names included in the first row. - self._results["timeseries"] = fname_presuffix( - "timeseries.tsv", + self._results['timeseries'] = fname_presuffix( + 'timeseries.tsv', newpath=runtime.cwd, use_ext=True, ) timeseries_df = pd.DataFrame(data=timeseries_arr, columns=node_labels) - timeseries_df.to_csv(self._results["timeseries"], sep="\t", na_rep="n/a", index=False) + timeseries_df.to_csv(self._results['timeseries'], sep='\t', na_rep='n/a', index=False) # Save out the coverage tsv coverage_df = pd.DataFrame( data=parcel_coverage.astype(np.float32), index=node_labels, - columns=["coverage"], + columns=['coverage'], ) - self._results["coverage"] = fname_presuffix( - "coverage.tsv", + self._results['coverage'] = fname_presuffix( + 'coverage.tsv', newpath=runtime.cwd, use_ext=True, ) - coverage_df.to_csv(self._results["coverage"], sep="\t", na_rep="n/a", index_label="Node") + coverage_df.to_csv(self._results['coverage'], sep='\t', na_rep='n/a', index_label='Node') return runtime @@ -214,11 +214,11 @@ def _sanitize_nifti_atlas(atlas, df): found_values = np.unique(atlas_data) found_values = found_values[found_values != 0] # drop the background value if not np.all(np.isin(found_values, expected_values)): - raise ValueError("Atlas file contains values that are not present in the DataFrame.") + raise ValueError('Atlas file contains values that are not present in the DataFrame.') # Map the labels in the DataFrame to sequential values. label_mapper = {value: i + 1 for i, value in enumerate(expected_values)} - df["sanitized_index"] = [label_mapper[i] for i in df.index.values] + df['sanitized_index'] = [label_mapper[i] for i in df.index.values] # Map the values in the atlas image to sequential values. new_atlas_data = np.zeros(atlas_data.shape, dtype=np.int16) diff --git a/aslprep/interfaces/plotting.py b/aslprep/interfaces/plotting.py index 22c084785..935ea6fce 100644 --- a/aslprep/interfaces/plotting.py +++ b/aslprep/interfaces/plotting.py @@ -18,9 +18,9 @@ class _ASLCarpetPlotInputSpec(BaseInterfaceInputSpec): - in_nifti = File(exists=True, mandatory=True, desc="input BOLD (4D NIfTI file)") - in_cifti = File(exists=True, desc="input BOLD (CIFTI dense timeseries)") - in_segm = File(exists=True, desc="volumetric segmentation corresponding to in_nifti") + in_nifti = File(exists=True, mandatory=True, desc='input BOLD (4D NIfTI file)') + in_cifti = File(exists=True, desc='input BOLD (CIFTI dense timeseries)') + in_segm = File(exists=True, desc='volumetric segmentation corresponding to in_nifti') confounds_file = File(exists=True, desc="BIDS' _confounds.tsv file") str_or_tuple = traits.Either( @@ -31,14 +31,14 @@ class _ASLCarpetPlotInputSpec(BaseInterfaceInputSpec): confounds_list = traits.Either( traits.List(str_or_tuple, minlen=1), None, - desc="list of headers to extract from the confounds_file", + desc='list of headers to extract from the confounds_file', ) - tr = traits.Either(None, traits.Float, usedefault=True, desc="the repetition time") - drop_trs = traits.Int(0, usedefault=True, desc="dummy scans") + tr = traits.Either(None, traits.Float, usedefault=True, desc='the repetition time') + drop_trs = traits.Int(0, usedefault=True, desc='dummy scans') class _ASLCarpetPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class ASLCarpetPlot(SimpleInterface): @@ -51,8 +51,8 @@ class ASLCarpetPlot(SimpleInterface): output_spec = _ASLCarpetPlotOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = fname_presuffix( - self.inputs.in_nifti, suffix="_fmriplot.svg", use_ext=False, newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_nifti, suffix='_fmriplot.svg', use_ext=False, newpath=runtime.cwd ) has_cifti = isdefined(self.inputs.in_cifti) @@ -64,9 +64,9 @@ def _run_interface(self, runtime): nb.load(seg_file), remap_rois=False, labels=( - ("WM+CSF", "Edge") + ('WM+CSF', 'Edge') if has_cifti - else ("Ctx GM", "dGM", "sWM+sCSF", "dWM+dCSF", "Cb", "Edge") + else ('Ctx GM', 'dGM', 'sWM+sCSF', 'dWM+dCSF', 'Cb', 'Edge') ), ) @@ -87,9 +87,9 @@ def _run_interface(self, runtime): dataframe = pd.read_table( self.inputs.confounds_file, index_col=None, - dtype="float32", + dtype='float32', na_filter=True, - na_values="n/a", + na_values='n/a', ) headers = [] @@ -123,20 +123,20 @@ def _run_interface(self, runtime): nskip=self.inputs.drop_trs, paired_carpet=has_cifti, ).plot() - fig.savefig(self._results["out_file"], bbox_inches="tight") + fig.savefig(self._results['out_file'], bbox_inches='tight') fig.clf() return runtime class _CBFSummaryPlotInputSpec(BaseInterfaceInputSpec): - cbf = File(exists=True, mandatory=True, desc="") - label = traits.Str(exists=True, mandatory=True, desc="label") - vmax = traits.Int(exists=True, default_value=90, mandatory=True, desc="max value of asl") - ref_vol = File(exists=True, mandatory=True, desc="") + cbf = File(exists=True, mandatory=True, desc='') + label = traits.Str(exists=True, mandatory=True, desc='label') + vmax = traits.Int(exists=True, default_value=90, mandatory=True, desc='max value of asl') + ref_vol = File(exists=True, mandatory=True, desc='') class _CBFSummaryPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class CBFSummaryPlot(SimpleInterface): @@ -149,9 +149,9 @@ class CBFSummaryPlot(SimpleInterface): output_spec = _CBFSummaryPlotOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.cbf, - suffix="_cbfplot.svg", + suffix='_cbfplot.svg', use_ext=False, newpath=runtime.cwd, ) @@ -160,18 +160,18 @@ def _run_interface(self, runtime): label=self.inputs.label, ref_vol=self.inputs.ref_vol, vmax=self.inputs.vmax, - outfile=self._results["out_file"], + outfile=self._results['out_file'], ).plot() return runtime class _CBFByTissueTypePlotInputSpec(BaseInterfaceInputSpec): - cbf = File(exists=True, mandatory=True, desc="") - seg_file = File(exists=True, mandatory=True, desc="Segmentation file") + cbf = File(exists=True, mandatory=True, desc='') + seg_file = File(exists=True, mandatory=True, desc='Segmentation file') class _CBFByTissueTypePlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class CBFByTissueTypePlot(SimpleInterface): @@ -185,25 +185,25 @@ def _run_interface(self, runtime): import seaborn as sns from nilearn import image, masking - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.cbf, - suffix="_cbfplot.svg", + suffix='_cbfplot.svg', use_ext=False, newpath=runtime.cwd, ) dfs = [] - for i_tissue_type, tissue_type in enumerate(["GM", "WM", "CSF"]): + for i_tissue_type, tissue_type in enumerate(['GM', 'WM', 'CSF']): tissue_type_val = i_tissue_type + 1 mask_img = image.math_img( - f"(img == {tissue_type_val}).astype(int)", + f'(img == {tissue_type_val}).astype(int)', img=self.inputs.seg_file, ) tissue_type_vals = masking.apply_mask(self.inputs.cbf, mask_img) df = pd.DataFrame( - columns=["CBF\n(mL/100 g/min)", "Tissue Type"], + columns=['CBF\n(mL/100 g/min)', 'Tissue Type'], data=list( - map(list, zip(*[tissue_type_vals, [tissue_type] * tissue_type_vals.size])) + map(list, zip(*[tissue_type_vals, [tissue_type] * tissue_type_vals.size], strict=False)) ), ) dfs.append(df) @@ -211,21 +211,21 @@ def _run_interface(self, runtime): df = pd.concat(dfs, axis=0) # Create the plot - with sns.axes_style("whitegrid"), sns.plotting_context(font_scale=3): + with sns.axes_style('whitegrid'), sns.plotting_context(font_scale=3): fig, ax = plt.subplots(figsize=(16, 8)) sns.despine(ax=ax, bottom=True, left=True) sns.boxenplot( - y="CBF\n(mL/100 g/min)", + y='CBF\n(mL/100 g/min)', data=df, width=0.6, showfliers=True, - palette={"GM": "#1b60a5", "WM": "#2da467", "CSF": "#9d8f25"}, - hue="Tissue Type", + palette={'GM': '#1b60a5', 'WM': '#2da467', 'CSF': '#9d8f25'}, + hue='Tissue Type', legend=False, ax=ax, ) fig.tight_layout() - fig.savefig(self._results["out_file"]) + fig.savefig(self._results['out_file']) fig.clf() return runtime diff --git a/aslprep/interfaces/reference.py b/aslprep/interfaces/reference.py index 6d617ee5a..3ffe92524 100644 --- a/aslprep/interfaces/reference.py +++ b/aslprep/interfaces/reference.py @@ -16,8 +16,8 @@ class _SelectHighestContrastVolumesInputSpec(BaseInterfaceInputSpec): - asl_file = File(exists=True, mandatory=True, desc="ASL file.") - aslcontext = File(exists=True, mandatory=True, desc="ASL context file.") + asl_file = File(exists=True, mandatory=True, desc='ASL file.') + aslcontext = File(exists=True, mandatory=True, desc='ASL context file.') m0scan = File( exists=True, mandatory=False, @@ -25,12 +25,12 @@ class _SelectHighestContrastVolumesInputSpec(BaseInterfaceInputSpec): ) prioritize_m0 = traits.Bool( mandatory=True, - desc="Whether to prioritize the M0 scan (useful for GE data) or not.", + desc='Whether to prioritize the M0 scan (useful for GE data) or not.', ) class _SelectHighestContrastVolumesOutputSpec(TraitedSpec): - selected_volumes_file = File(desc="File containing the highest-contrast available volumes.") + selected_volumes_file = File(desc='File containing the highest-contrast available volumes.') class SelectHighestContrastVolumes(SimpleInterface): @@ -56,41 +56,41 @@ def _run_interface(self, runtime): aslcontext_df = pd.read_table(self.inputs.aslcontext) assert aslcontext_df.shape[0] == asl_img.shape[3] - if "m0scan" in aslcontext_df["volume_type"].tolist() and self.inputs.prioritize_m0: - target_type = "m0scan" + if 'm0scan' in aslcontext_df['volume_type'].tolist() and self.inputs.prioritize_m0: + target_type = 'm0scan' elif isdefined(self.inputs.m0scan) and self.inputs.prioritize_m0: - target_type = "separate_m0scan" - elif "cbf" in aslcontext_df["volume_type"].tolist(): - target_type = "cbf" - elif "deltam" in aslcontext_df["volume_type"].tolist(): - target_type = "deltam" - elif "m0scan" in aslcontext_df["volume_type"].tolist(): - target_type = "m0scan" + target_type = 'separate_m0scan' + elif 'cbf' in aslcontext_df['volume_type'].tolist(): + target_type = 'cbf' + elif 'deltam' in aslcontext_df['volume_type'].tolist(): + target_type = 'deltam' + elif 'm0scan' in aslcontext_df['volume_type'].tolist(): + target_type = 'm0scan' elif isdefined(self.inputs.m0scan): - target_type = "separate_m0scan" + target_type = 'separate_m0scan' else: - target_type = "control" + target_type = 'control' config.loggers.interface.info( - f"Selecting {target_type} as highest-contrast volume type for reference volume " - "generation." + f'Selecting {target_type} as highest-contrast volume type for reference volume ' + 'generation.' ) - if target_type == "separate_m0scan": - self._results["selected_volumes_file"] = self.inputs.m0scan + if target_type == 'separate_m0scan': + self._results['selected_volumes_file'] = self.inputs.m0scan return runtime # Otherwise, split up the ASL file based on the volume type with the highest contrast. - target_idx = aslcontext_df.loc[aslcontext_df["volume_type"] == target_type].index.values + target_idx = aslcontext_df.loc[aslcontext_df['volume_type'] == target_type].index.values if target_idx.size == 0: raise ValueError(f"Volume type '{target_type}' missing from {self.inputs.aslcontext}") asl_data = asl_img.get_fdata() highest_contrast_data = asl_data[:, :, :, target_idx] - self._results["selected_volumes_file"] = fname_presuffix( + self._results['selected_volumes_file'] = fname_presuffix( self.inputs.asl_file, - suffix="_contrast.nii.gz", + suffix='_contrast.nii.gz', newpath=runtime.cwd, use_ext=False, ) @@ -99,6 +99,6 @@ def _run_interface(self, runtime): dataobj=highest_contrast_data, affine=asl_img.affine, header=asl_img.header, - ).to_filename(self._results["selected_volumes_file"]) + ).to_filename(self._results['selected_volumes_file']) return runtime diff --git a/aslprep/interfaces/reports.py b/aslprep/interfaces/reports.py index bcec8136c..2c1762dbb 100644 --- a/aslprep/interfaces/reports.py +++ b/aslprep/interfaces/reports.py @@ -64,7 +64,7 @@ class _SummaryOutputSpec(TraitedSpec): - out_report = File(exists=True, desc="HTML segment containing summary") + out_report = File(exists=True, desc='HTML segment containing summary') class SummaryInterface(SimpleInterface): @@ -74,10 +74,10 @@ class SummaryInterface(SimpleInterface): def _run_interface(self, runtime): segment = self._generate_segment() - fname = os.path.join(runtime.cwd, "report.html") - with open(fname, "w") as fobj: + fname = os.path.join(runtime.cwd, 'report.html') + with open(fname, 'w') as fobj: fobj.write(segment) - self._results["out_report"] = fname + self._results['out_report'] = fname return runtime def _generate_segment(self): @@ -85,22 +85,22 @@ def _generate_segment(self): class _SubjectSummaryInputSpec(BaseInterfaceInputSpec): - t1w = InputMultiObject(File(exists=True), desc="T1w structural images") - t2w = InputMultiObject(File(exists=True), desc="T2w structural images") - subjects_dir = Directory(desc="FreeSurfer subjects directory") - subject_id = Str(desc="Subject ID") + t1w = InputMultiObject(File(exists=True), desc='T1w structural images') + t2w = InputMultiObject(File(exists=True), desc='T2w structural images') + subjects_dir = Directory(desc='FreeSurfer subjects directory') + subject_id = Str(desc='Subject ID') asl = InputMultiObject( traits.Either(File(exists=True), traits.List(File(exists=True))), - desc="ASL functional series", + desc='ASL functional series', ) - std_spaces = traits.List(Str, desc="list of standard spaces") - nstd_spaces = traits.List(Str, desc="list of non-standard spaces") + std_spaces = traits.List(Str, desc='list of standard spaces') + nstd_spaces = traits.List(Str, desc='list of non-standard spaces') class _SubjectSummaryOutputSpec(_SummaryOutputSpec): # This exists to ensure that the summary is run prior to the first ReconAll # call, allowing a determination whether there is a pre-existing directory - subject_id = Str(desc="FreeSurfer subject ID") + subject_id = Str(desc='FreeSurfer subject ID') class SubjectSummary(SummaryInterface): @@ -111,27 +111,27 @@ class SubjectSummary(SummaryInterface): def _run_interface(self, runtime): if isdefined(self.inputs.subject_id): - self._results["subject_id"] = self.inputs.subject_id + self._results['subject_id'] = self.inputs.subject_id return super(SubjectSummary, self)._run_interface(runtime) def _generate_segment(self): if not isdefined(self.inputs.subjects_dir): - freesurfer_status = "Not run" + freesurfer_status = 'Not run' else: recon = ReconAll( subjects_dir=self.inputs.subjects_dir, - subject_id="sub-" + self.inputs.subject_id, + subject_id='sub-' + self.inputs.subject_id, T1_files=self.inputs.t1w, - flags="-noskullstrip", + flags='-noskullstrip', ) - if recon.cmdline.startswith("echo"): - freesurfer_status = "Pre-existing directory" + if recon.cmdline.startswith('echo'): + freesurfer_status = 'Pre-existing directory' else: - freesurfer_status = "Run by ASLPrep" + freesurfer_status = 'Run by ASLPrep' - t2w_seg = "" + t2w_seg = '' if self.inputs.t2w: - t2w_seg = f"(+ {len(self.inputs.t2w):d} T2-weighted)" + t2w_seg = f'(+ {len(self.inputs.t2w):d} T2-weighted)' # Add list of tasks with number of runs asl_series = self.inputs.asl if isdefined(self.inputs.asl) else [] @@ -142,51 +142,51 @@ def _generate_segment(self): n_t1s=len(self.inputs.t1w), t2w=t2w_seg, n_asl=len(asl_series), - std_spaces=", ".join(self.inputs.std_spaces), - nstd_spaces=", ".join(self.inputs.nstd_spaces), + std_spaces=', '.join(self.inputs.std_spaces), + nstd_spaces=', '.join(self.inputs.nstd_spaces), freesurfer_status=freesurfer_status, ) class _FunctionalSummaryInputSpec(BaseInterfaceInputSpec): distortion_correction = traits.Str( - desc="Susceptibility distortion correction method", + desc='Susceptibility distortion correction method', mandatory=True, ) pe_direction = traits.Enum( None, - "i", - "i-", - "j", - "j-", + 'i', + 'i-', + 'j', + 'j-', mandatory=True, - desc="Phase-encoding direction detected", + desc='Phase-encoding direction detected', ) registration = traits.Enum( - "FSL", - "FreeSurfer", + 'FSL', + 'FreeSurfer', mandatory=True, - desc="Functional/anatomical registration method", + desc='Functional/anatomical registration method', ) - fallback = traits.Bool(desc="Boundary-based registration rejected") + fallback = traits.Bool(desc='Boundary-based registration rejected') registration_dof = traits.Enum( 6, 9, 12, - desc="Registration degrees of freedom", + desc='Registration degrees of freedom', mandatory=True, ) registration_init = traits.Enum( - "register", - "header", + 'register', + 'header', mandatory=True, desc='Whether to initialize registration with the "header"' ' or by centering the volumes ("register")', ) - confounds_file = File(exists=True, mandatory=False, desc="Confounds file") - qc_file = File(exists=True, desc="qc file") - tr = traits.Float(desc="Repetition time", mandatory=True) - orientation = traits.Str(mandatory=True, desc="Orientation of the voxel axes") + confounds_file = File(exists=True, mandatory=False, desc='Confounds file') + qc_file = File(exists=True, desc='qc file') + tr = traits.Float(desc='Repetition time', mandatory=True) + orientation = traits.Str(mandatory=True, desc='Orientation of the voxel axes') class FunctionalSummary(SummaryInterface): @@ -197,24 +197,24 @@ class FunctionalSummary(SummaryInterface): def _generate_segment(self): dof = self.inputs.registration_dof reg = { - "FSL": [ - "FSL flirt with boundary-based registration" - f" (BBR) metric - {dof} dof", - "FSL flirt rigid registration - 6 dof", + 'FSL': [ + 'FSL flirt with boundary-based registration' + f' (BBR) metric - {dof} dof', + 'FSL flirt rigid registration - 6 dof', ], - "FreeSurfer": [ - "FreeSurfer bbregister " - f"(boundary-based registration, BBR) - {dof} dof", - f"FreeSurfer mri_coreg - {dof} dof", + 'FreeSurfer': [ + 'FreeSurfer bbregister ' + f'(boundary-based registration, BBR) - {dof} dof', + f'FreeSurfer mri_coreg - {dof} dof', ], }[self.inputs.registration][self.inputs.fallback] pedir = get_world_pedir(self.inputs.orientation, self.inputs.pe_direction) if self.inputs.pe_direction is None: - pedir = "MISSING - Assuming Anterior-Posterior" + pedir = 'MISSING - Assuming Anterior-Posterior' else: - pedir = {"i": "Left-Right", "j": "Anterior-Posterior"}[self.inputs.pe_direction[0]] + pedir = {'i': 'Left-Right', 'j': 'Anterior-Posterior'}[self.inputs.pe_direction[0]] # the number of dummy scans was specified by the user and # it is not equal to the number detected by the algorithm @@ -230,8 +230,8 @@ def _generate_segment(self): class _CBFSummaryInputSpec(BaseInterfaceInputSpec): - confounds_file = File(exists=True, mandatory=False, desc="Confounds file") - qc_file = File(exists=True, desc="qc file") + confounds_file = File(exists=True, mandatory=False, desc='Confounds file') + qc_file = File(exists=True, desc='qc file') class CBFSummary(SummaryInterface): @@ -276,16 +276,16 @@ def _generate_segment(self): if isdefined(self.inputs.confounds_file): with open(self.inputs.confounds_file) as cfh: - conflist = cfh.readline().strip("\n").strip() + conflist = cfh.readline().strip('\n').strip() else: - conflist = "None" + conflist = 'None' # the number of dummy scans was specified by the user and # it is not equal to the number detected by the algorithm # the number of dummy scans was not specified by the user return QC_TEMPLATE.format( - confounds=re.sub(r"[\t ]+", ", ", conflist), + confounds=re.sub(r'[\t ]+', ', ', conflist), motionparam=motionparam, qei=qei, coregindex=coregindex, @@ -296,8 +296,8 @@ def _generate_segment(self): class _AboutSummaryInputSpec(BaseInterfaceInputSpec): - version = Str(desc="ASLPREP version") - command = Str(desc="ASLPREP command") + version = Str(desc='ASLPREP version') + command = Str(desc='ASLPREP command') # Date not included - update timestamp only if version or command changes @@ -310,5 +310,5 @@ def _generate_segment(self): return ABOUT_TEMPLATE.format( version=self.inputs.version, command=self.inputs.command, - date=time.strftime("%Y-%m-%d %H:%M:%S %z"), + date=time.strftime('%Y-%m-%d %H:%M:%S %z'), ) diff --git a/aslprep/interfaces/utility.py b/aslprep/interfaces/utility.py index a7f2c0499..8e28a56ce 100644 --- a/aslprep/interfaces/utility.py +++ b/aslprep/interfaces/utility.py @@ -20,15 +20,15 @@ class _ReduceASLFilesInputSpec(BaseInterfaceInputSpec): - asl_file = File(exists=True, mandatory=True, desc="ASL file to split.") - aslcontext = File(exists=True, mandatory=True, desc="aslcontext TSV.") + asl_file = File(exists=True, mandatory=True, desc='ASL file to split.') + aslcontext = File(exists=True, mandatory=True, desc='aslcontext TSV.') processing_target = traits.Str() metadata = traits.Dict() class _ReduceASLFilesOutputSpec(TraitedSpec): - asl_file = File(exists=True, desc="Modified ASL file.") - aslcontext = File(exists=True, desc="Modified aslcontext file.") + asl_file = File(exists=True, desc='Modified ASL file.') + aslcontext = File(exists=True, desc='Modified aslcontext file.') metadata = traits.Dict() @@ -47,17 +47,17 @@ def _run_interface(self, runtime): f"number of rows in {self.inputs.aslcontext} ({aslcontext.shape[0]})." ) - if self.inputs.processing_target == "control": - files_to_keep = ["control", "label", "m0scan"] - elif self.inputs.processing_target == "deltam": - files_to_keep = ["deltam", "m0scan"] + if self.inputs.processing_target == 'control': + files_to_keep = ['control', 'label', 'm0scan'] + elif self.inputs.processing_target == 'deltam': + files_to_keep = ['deltam', 'm0scan'] else: - files_to_keep = ["cbf", "m0scan"] + files_to_keep = ['cbf', 'm0scan'] n_volumes = aslcontext.shape[0] - asl_idx = aslcontext.loc[aslcontext["volume_type"].isin(files_to_keep)].index.values + asl_idx = aslcontext.loc[aslcontext['volume_type'].isin(files_to_keep)].index.values asl_idx = asl_idx.astype(int) - self._results["metadata"] = reduce_metadata_lists( + self._results['metadata'] = reduce_metadata_lists( metadata=self.inputs.metadata, n_volumes=n_volumes, keep_idx=asl_idx, @@ -65,22 +65,22 @@ def _run_interface(self, runtime): asl_img = image.index_img(asl_img, asl_idx) - self._results["asl_file"] = fname_presuffix( + self._results['asl_file'] = fname_presuffix( self.inputs.asl_file, - suffix="_reduced", + suffix='_reduced', newpath=runtime.cwd, use_ext=True, ) - asl_img.to_filename(self._results["asl_file"]) + asl_img.to_filename(self._results['asl_file']) aslcontext = aslcontext.loc[asl_idx] - self._results["aslcontext"] = fname_presuffix( + self._results['aslcontext'] = fname_presuffix( self.inputs.aslcontext, - suffix="_reduced", + suffix='_reduced', newpath=runtime.cwd, use_ext=True, ) - aslcontext.to_csv(self._results["aslcontext"], sep="\t", index=False) + aslcontext.to_csv(self._results['aslcontext'], sep='\t', index=False) return runtime @@ -89,22 +89,22 @@ class _RMSDiffInputSpec(FSLCommandInputSpec): matrixfile1 = File( exists=True, position=0, - argstr="%s", - desc="First matrix file.", + argstr='%s', + desc='First matrix file.', mandatory=True, ) matrixfile2 = File( exists=True, position=1, - argstr="%s", - desc="Second matrix file.", + argstr='%s', + desc='Second matrix file.', mandatory=True, ) ref_vol = File( exists=True, position=2, - argstr="%s", - desc="Reference volume.", + argstr='%s', + desc='Reference volume.', mandatory=True, ) @@ -116,7 +116,7 @@ class _RMSDiffOutputSpec(TraitedSpec): class RMSDiff(FSLCommand): """Run rmsdiff.""" - _cmd = "rmsdiff" + _cmd = 'rmsdiff' input_spec = _RMSDiffInputSpec output_spec = _RMSDiffOutputSpec @@ -124,16 +124,16 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): # noqa: U100 """Taken from nipype.interfaces.afni.preprocess.ClipLevel.""" outputs = self._outputs() - outfile = os.path.join(os.getcwd(), "stat_result.json") + outfile = os.path.join(os.getcwd(), 'stat_result.json') if runtime is None: try: - rmsd = load_json(outfile)["stat"] - except IOError: + rmsd = load_json(outfile)['stat'] + except OSError: return self.run().outputs else: rmsd = [] - for line in runtime.stdout.split("\n"): + for line in runtime.stdout.split('\n'): if line: values = line.split() if len(values) > 1: @@ -154,18 +154,18 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): # noqa: U100 class _PairwiseRMSDiffInputSpec(BaseInterfaceInputSpec): in_files = traits.List( File(exists=True), - desc="Matrix files to compare with each other.", + desc='Matrix files to compare with each other.', mandatory=True, ) ref_file = File( exists=True, - desc="Reference volume.", + desc='Reference volume.', mandatory=True, ) class _PairwiseRMSDiffOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Output txt file.") + out_file = File(exists=True, desc='Output txt file.') class PairwiseRMSDiff(SimpleInterface): @@ -190,14 +190,14 @@ def _run_interface(self, runtime): assert isinstance(res.outputs.rmsd, float) rmsd.append(str(res.outputs.rmsd)) - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.ref_file, - suffix="_rmsd.txt", + suffix='_rmsd.txt', newpath=runtime.cwd, use_ext=False, ) - with open(self._results["out_file"], "w") as fo: - fo.write("\n".join(rmsd)) + with open(self._results['out_file'], 'w') as fo: + fo.write('\n'.join(rmsd)) return runtime @@ -232,25 +232,25 @@ def _run_interface(self, runtime): type_mat_files = self.inputs.mat_files[i_type] type_par_file = self.inputs.par_files[i_type] - type_idx = aslcontext.loc[aslcontext["volume_type"] == volume_type].index.values + type_idx = aslcontext.loc[aslcontext['volume_type'] == volume_type].index.values - with open(type_par_file, "r") as fo: + with open(type_par_file) as fo: par = fo.readlines() for i_vol, vol_idx in enumerate(type_idx): out_par[vol_idx] = par[i_vol] out_mat_files[vol_idx] = type_mat_files[i_vol] - self._results["combined_par_file"] = fname_presuffix( + self._results['combined_par_file'] = fname_presuffix( type_par_file, - suffix="_combined", + suffix='_combined', newpath=runtime.cwd, use_ext=True, ) - with open(self._results["combined_par_file"], "w") as fo: - fo.write("".join(out_par)) + with open(self._results['combined_par_file'], 'w') as fo: + fo.write(''.join(out_par)) - self._results["mat_file_list"] = out_mat_files + self._results['mat_file_list'] = out_mat_files return runtime @@ -273,24 +273,24 @@ class SplitByVolumeType(SimpleInterface): def _run_interface(self, runtime): aslcontext = pd.read_table(self.inputs.aslcontext) - volume_types = sorted(list(aslcontext["volume_type"].unique())) + volume_types = sorted(list(aslcontext['volume_type'].unique())) out_files = [] for volume_type in volume_types: - volumetype_df = aslcontext.loc[aslcontext["volume_type"] == volume_type] + volumetype_df = aslcontext.loc[aslcontext['volume_type'] == volume_type] volumetype_idx = volumetype_df.index.tolist() out_img = image.index_img(self.inputs.asl_file, volumetype_idx) out_file = fname_presuffix( self.inputs.asl_file, - suffix=f"_{volume_type}", + suffix=f'_{volume_type}', newpath=runtime.cwd, use_ext=True, ) out_img.to_filename(out_file) out_files.append(out_file) - self._results["out_files"] = out_files - self._results["volume_types"] = volume_types + self._results['out_files'] = out_files + self._results['volume_types'] = volume_types return runtime @@ -299,7 +299,7 @@ class _SmoothInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="An image to smooth.", + desc='An image to smooth.', ) fwhm = traits.Either( traits.Float(), @@ -309,22 +309,22 @@ class _SmoothInputSpec(BaseInterfaceInputSpec): maxlen=3, ), desc=( - "Full width at half maximum. " - "Smoothing strength, as a full-width at half maximum, in millimeters." + 'Full width at half maximum. ' + 'Smoothing strength, as a full-width at half maximum, in millimeters.' ), ) out_file = File( - "smooth_img.nii.gz", + 'smooth_img.nii.gz', usedefault=True, exists=False, - desc="The name of the smoothed file to write out. smooth_img.nii.gz by default.", + desc='The name of the smoothed file to write out. smooth_img.nii.gz by default.', ) class _SmoothOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Smoothed output file.", + desc='Smoothed output file.', ) @@ -338,12 +338,12 @@ def _run_interface(self, runtime): from nilearn.image import smooth_img img_smoothed = smooth_img(self.inputs.in_file, fwhm=self.inputs.fwhm) - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_file, - suffix="_sm.nii.gz", + suffix='_sm.nii.gz', newpath=runtime.cwd, use_ext=False, ) - img_smoothed.to_filename(self._results["out_file"]) + img_smoothed.to_filename(self._results['out_file']) return runtime diff --git a/aslprep/tests/conftest.py b/aslprep/tests/conftest.py index c77b1a174..03f5a2975 100644 --- a/aslprep/tests/conftest.py +++ b/aslprep/tests/conftest.py @@ -8,55 +8,55 @@ def pytest_addoption(parser): """Collect pytest parameters for running tests.""" - parser.addoption("--working_dir", action="store", default="/tmp") - parser.addoption("--data_dir", action="store") - parser.addoption("--output_dir", action="store") + parser.addoption('--working_dir', action='store', default='/tmp') + parser.addoption('--data_dir', action='store') + parser.addoption('--output_dir', action='store') # Set up the commandline options as fixtures -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def data_dir(request): """Grab data directory.""" - return request.config.getoption("--data_dir") + return request.config.getoption('--data_dir') -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def working_dir(request): """Grab working directory.""" - workdir = request.config.getoption("--working_dir") + workdir = request.config.getoption('--working_dir') os.makedirs(workdir, exist_ok=True) return workdir -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def output_dir(request): """Grab output directory.""" - outdir = request.config.getoption("--output_dir") + outdir = request.config.getoption('--output_dir') os.makedirs(outdir, exist_ok=True) return outdir -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def datasets(data_dir): """Locate downloaded datasets.""" return { - "examples_pasl_multipld": os.path.join(data_dir, "examples_pasl_multipld"), - "examples_pcasl_multipld": os.path.join(data_dir, "examples_pcasl_multipld"), - "examples_pcasl_singlepld": os.path.join(data_dir, "examples_pcasl_singlepld"), - "qtab": os.path.join(data_dir, "qtab"), - "test_001": os.path.join(data_dir, "test_001"), - "test_002": os.path.join(data_dir, "test_002"), - "test_003": os.path.join(data_dir, "test_003"), + 'examples_pasl_multipld': os.path.join(data_dir, 'examples_pasl_multipld'), + 'examples_pcasl_multipld': os.path.join(data_dir, 'examples_pcasl_multipld'), + 'examples_pcasl_singlepld': os.path.join(data_dir, 'examples_pcasl_singlepld'), + 'qtab': os.path.join(data_dir, 'qtab'), + 'test_001': os.path.join(data_dir, 'test_001'), + 'test_002': os.path.join(data_dir, 'test_002'), + 'test_003': os.path.join(data_dir, 'test_003'), } -@pytest.fixture(scope="session", autouse=True) +@pytest.fixture(scope='session', autouse=True) def fslicense(working_dir): """Set the FreeSurfer license as an environment variable.""" - FS_LICENSE = os.path.join(working_dir, "license.txt") - os.environ["FS_LICENSE"] = FS_LICENSE + FS_LICENSE = os.path.join(working_dir, 'license.txt') + os.environ['FS_LICENSE'] = FS_LICENSE LICENSE_CODE = ( - "bWF0dGhldy5jaWVzbGFrQHBzeWNoLnVjc2IuZWR1CjIwNzA2CipDZmVWZEg1VVQ4clkKRlNCWVouVWtlVElDdwo=" + 'bWF0dGhldy5jaWVzbGFrQHBzeWNoLnVjc2IuZWR1CjIwNzA2CipDZmVWZEg1VVQ4clkKRlNCWVouVWtlVElDdwo=' ) - with open(FS_LICENSE, "w") as f: + with open(FS_LICENSE, 'w') as f: f.write(base64.b64decode(LICENSE_CODE).decode()) diff --git a/aslprep/tests/run_local_tests.py b/aslprep/tests/run_local_tests.py index 3636df5e7..e2bc0e84e 100644 --- a/aslprep/tests/run_local_tests.py +++ b/aslprep/tests/run_local_tests.py @@ -14,20 +14,20 @@ def _get_parser(): """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( - "-k", - dest="test_regex", - metavar="PATTERN", + '-k', + dest='test_regex', + metavar='PATTERN', type=str, - help="Test pattern.", + help='Test pattern.', required=False, default=None, ) parser.add_argument( - "-m", - dest="test_mark", - metavar="LABEL", + '-m', + dest='test_mark', + metavar='LABEL', type=str, - help="Test mark label.", + help='Test mark label.', required=False, default=None, ) @@ -52,35 +52,35 @@ def run_command(command, env=None): ) while True: line = process.stdout.readline() - line = str(line, "utf-8")[:-1] + line = str(line, 'utf-8')[:-1] print(line) - if line == "" and process.poll() is not None: + if line == '' and process.poll() is not None: break if process.returncode != 0: raise RuntimeError( - f"Non zero return code: {process.returncode}\n" f"{command}\n\n{process.stdout.read()}" + f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}' ) def run_tests(test_regex, test_mark): """Run the tests.""" local_patch = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - mounted_code = "/usr/local/miniconda/lib/python3.9/site-packages/aslprep" - run_str = "docker run --rm -ti " - run_str += f"-v {local_patch}:/usr/local/miniconda/lib/python3.9/site-packages/aslprep " - run_str += "--entrypoint pytest " - run_str += "pennlinc/aslprep:unstable " + mounted_code = '/usr/local/miniconda/lib/python3.9/site-packages/aslprep' + run_str = 'docker run --rm -ti ' + run_str += f'-v {local_patch}:/usr/local/miniconda/lib/python3.9/site-packages/aslprep ' + run_str += '--entrypoint pytest ' + run_str += 'pennlinc/aslprep:unstable ' run_str += ( - f"{mounted_code}/aslprep " - f"--data_dir={mounted_code}/aslprep/tests/test_data " - f"--output_dir={mounted_code}/aslprep/tests/pytests/out " - f"--working_dir={mounted_code}/aslprep/tests/pytests/work " + f'{mounted_code}/aslprep ' + f'--data_dir={mounted_code}/aslprep/tests/test_data ' + f'--output_dir={mounted_code}/aslprep/tests/pytests/out ' + f'--working_dir={mounted_code}/aslprep/tests/pytests/work ' ) if test_regex: - run_str += f"-k {test_regex} " + run_str += f'-k {test_regex} ' elif test_mark: - run_str += f"-rP -o log_cli=true -m {test_mark} " + run_str += f'-rP -o log_cli=true -m {test_mark} ' run_command(run_str) @@ -92,5 +92,5 @@ def _main(argv=None): run_tests(**kwargs) -if __name__ == "__main__": +if __name__ == '__main__': _main() diff --git a/aslprep/tests/test_cli.py b/aslprep/tests/test_cli.py index 9755f4a98..169826c6e 100644 --- a/aslprep/tests/test_cli.py +++ b/aslprep/tests/test_cli.py @@ -18,7 +18,7 @@ nipype_config.enable_debug_mode() -@pytest.mark.examples_pasl_multipld +@pytest.mark.examples_pasl_multipld() def test_examples_pasl_multipld(data_dir, output_dir, working_dir): """Run aslprep on the asl_003 ASL-BIDS examples dataset. @@ -28,334 +28,334 @@ def test_examples_pasl_multipld(data_dir, output_dir, working_dir): PASL multi-delay data is not yet supported. """ - TEST_NAME = "examples_pasl_multipld" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'examples_pasl_multipld' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces=asl", - "--scorescrub", - "--basil", - "--use-syn-sdc", - "--m0_scale=10", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces=asl', + '--scorescrub', + '--basil', + '--use-syn-sdc', + '--m0_scale=10', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_fail(parameters) -@pytest.mark.examples_pcasl_multipld +@pytest.mark.examples_pcasl_multipld() def test_examples_pcasl_multipld(data_dir, output_dir, working_dir): """Run aslprep on the asl_004 ASL-BIDS examples dataset. This dataset has 48 control-label pairs at 6 different PLDs, along with a separate M0 scan. The manufacturer is Siemens. """ - TEST_NAME = "examples_pcasl_multipld" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'examples_pcasl_multipld' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces=asl", - "--scorescrub", - "--basil", - "--m0_scale=10", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces=asl', + '--scorescrub', + '--basil', + '--m0_scale=10', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.examples_pcasl_singlepld_ge +@pytest.mark.examples_pcasl_singlepld_ge() def test_examples_pcasl_singlepld_ge(data_dir, output_dir, working_dir): """Run aslprep on the asl_001 ASL-BIDS examples dataset. This test uses a GE session with two volumes: one deltam and one M0. """ - TEST_NAME = "examples_pcasl_singlepld_ge" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'examples_pcasl_singlepld_ge' + PARTICIPANT_LABEL = '01' - dataset_dir = download_test_data("examples_pcasl_singlepld", data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + dataset_dir = download_test_data('examples_pcasl_singlepld', data_dir) + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, f"{TEST_NAME}_filter.json") + filter_file = os.path.join(test_data_dir, f'{TEST_NAME}_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces=asl", - "--scorescrub", - "--basil", - "--m0_scale=96", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces=asl', + '--scorescrub', + '--basil', + '--m0_scale=96', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.examples_pcasl_singlepld_philips +@pytest.mark.examples_pcasl_singlepld_philips() def test_examples_pcasl_singlepld_philips(data_dir, output_dir, working_dir): """Run aslprep on the asl_002 ASL-BIDS examples dataset. This test uses a Philips session. The appropriate M0 scale is unknown for this dataset, so CBF values will be inflated. """ - TEST_NAME = "examples_pcasl_singlepld_philips" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'examples_pcasl_singlepld_philips' + PARTICIPANT_LABEL = '01' - dataset_dir = download_test_data("examples_pcasl_singlepld", data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + dataset_dir = download_test_data('examples_pcasl_singlepld', data_dir) + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, f"{TEST_NAME}_filter.json") + filter_file = os.path.join(test_data_dir, f'{TEST_NAME}_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "asl", - "fsaverage:den-10k", - "--scorescrub", - "--basil", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'asl', + 'fsaverage:den-10k', + '--scorescrub', + '--basil', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.examples_pcasl_singlepld_siemens +@pytest.mark.examples_pcasl_singlepld_siemens() def test_examples_pcasl_singlepld_siemens(data_dir, output_dir, working_dir): """Run aslprep on the asl_005 ASL-BIDS examples dataset. This test uses a Siemens session. """ - TEST_NAME = "examples_pcasl_singlepld_siemens" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'examples_pcasl_singlepld_siemens' + PARTICIPANT_LABEL = '01' - dataset_dir = download_test_data("examples_pcasl_singlepld", data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + dataset_dir = download_test_data('examples_pcasl_singlepld', data_dir) + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, f"{TEST_NAME}_filter.json") + filter_file = os.path.join(test_data_dir, f'{TEST_NAME}_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "MNI152NLin2009cAsym", - "--basil", - "--m0_scale=10", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'MNI152NLin2009cAsym', + '--basil', + '--m0_scale=10', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.qtab +@pytest.mark.qtab() def test_qtab(data_dir, output_dir, working_dir): """Run aslprep on QTAB data. This dataset is Siemens. """ - TEST_NAME = "qtab" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'qtab' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "asl", - "T1w", - "MNI152NLin2009cAsym", - "--scorescrub", - "--use-syn-sdc", - "--force-no-ge", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'asl', + 'T1w', + 'MNI152NLin2009cAsym', + '--scorescrub', + '--use-syn-sdc', + '--force-no-ge', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.test_001 +@pytest.mark.test_001() def test_test_001(data_dir, output_dir, working_dir): """Run aslprep on sub-01 data. This dataset is Siemens. """ - TEST_NAME = "test_001" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'test_001' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "asl", - "T1w", - "MNI152NLin2009cAsym", - "--scorescrub", - "--force-no-ge", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'asl', + 'T1w', + 'MNI152NLin2009cAsym', + '--scorescrub', + '--force-no-ge', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.test_002 +@pytest.mark.test_002() def test_test_002(data_dir, output_dir, working_dir): """Run aslprep on sub-01. This dataset contains PCASL data from a GE scanner. There are two ASL volumes (both deltam) and separate M0 scan. """ - TEST_NAME = "test_002" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'test_002' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - out_dir = os.path.join(output_dir, TEST_NAME, "aslprep") + download_test_data('anatomical', data_dir) + out_dir = os.path.join(output_dir, TEST_NAME, 'aslprep') work_dir = os.path.join(working_dir, TEST_NAME) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "asl", - "MNI152NLin2009cAsym", - "--scorescrub", - "--use-syn-sdc", - "--m0_scale=96", - "--force-ge", - "--fs-no-reconall", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'asl', + 'MNI152NLin2009cAsym', + '--scorescrub', + '--use-syn-sdc', + '--m0_scale=96', + '--force-ge', + '--fs-no-reconall', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", ] _run_and_generate(TEST_NAME, PARTICIPANT_LABEL, parameters, out_dir) -@pytest.mark.test_003_minimal +@pytest.mark.test_003_minimal() def test_test_003_minimal(data_dir, output_dir, working_dir): """Run ASLPrep minimal workflow on test_003 dataset.""" base_test_003( data_dir, output_dir, working_dir, - level="minimal", - extra_params=["--fs-no-reconall"], + level='minimal', + extra_params=['--fs-no-reconall'], ) -@pytest.mark.test_003_resampling +@pytest.mark.test_003_resampling() def test_test_003_resampling(data_dir, output_dir, working_dir): """Run ASLPrep resampling workflow on test_003 dataset.""" base_test_003( data_dir, output_dir, working_dir, - level="resampling", - extra_params=["--fs-no-reconall"], + level='resampling', + extra_params=['--fs-no-reconall'], ) -@pytest.mark.test_003_full +@pytest.mark.test_003_full() def test_test_003_full(data_dir, output_dir, working_dir): """Run ASLPrep full workflow on test_003 dataset.""" base_test_003( data_dir, output_dir, working_dir, - level="full", - extra_params=["--cifti-output", "91k"], + level='full', + extra_params=['--cifti-output', '91k'], ) @@ -364,31 +364,31 @@ def base_test_003(data_dir, output_dir, working_dir, level, extra_params): This dataset is Siemens. """ - TEST_NAME = "test_003" - PARTICIPANT_LABEL = "01" + TEST_NAME = 'test_003' + PARTICIPANT_LABEL = '01' dataset_dir = download_test_data(TEST_NAME, data_dir) - download_test_data("anatomical", data_dir) - level_test_name = f"{TEST_NAME}_{level}" - out_dir = os.path.join(output_dir, level_test_name, "aslprep") + download_test_data('anatomical', data_dir) + level_test_name = f'{TEST_NAME}_{level}' + out_dir = os.path.join(output_dir, level_test_name, 'aslprep') work_dir = os.path.join(working_dir, level_test_name) parameters = [ dataset_dir, out_dir, - "participant", - f"--participant-label={PARTICIPANT_LABEL}", - f"-w={work_dir}", - "--nthreads=1", - "--omp-nthreads=1", - "--output-spaces", - "asl", - "--use-syn-sdc", - "--m0_scale=10", + 'participant', + f'--participant-label={PARTICIPANT_LABEL}', + f'-w={work_dir}', + '--nthreads=1', + '--omp-nthreads=1', + '--output-spaces', + 'asl', + '--use-syn-sdc', + '--m0_scale=10', f"--fs-subjects-dir={os.path.join(data_dir, 'anatomical/freesurfer')}", - "--derivatives", + '--derivatives', f"{os.path.join(data_dir, 'anatomical/smriprep')}", - f"--level={level}", + f'--level={level}', ] parameters += extra_params @@ -398,38 +398,38 @@ def base_test_003(data_dir, output_dir, working_dir, level, extra_params): def _run_and_generate(test_name, participant_label, parameters, out_dir): from aslprep import config - parameters.append("--stop-on-first-crash") - parameters.append("--clean-workdir") - parameters.append("-vv") + parameters.append('--stop-on-first-crash') + parameters.append('--clean-workdir') + parameters.append('-vv') parse_args(parameters) - config_file = config.execution.work_dir / f"config-{config.execution.run_uuid}.toml" - config.loggers.cli.warning(f"Saving config file to {config_file}") + config_file = config.execution.work_dir / f'config-{config.execution.run_uuid}.toml' + config.loggers.cli.warning(f'Saving config file to {config_file}') config.to_filename(config_file) retval = build_workflow(config_file, retval={}) - aslprep_wf = retval["workflow"] + aslprep_wf = retval['workflow'] aslprep_wf.run() build_boilerplate(str(config_file), aslprep_wf) generate_reports( [participant_label], out_dir, config.execution.run_uuid, - config=load_data("reports-spec.yml"), - packagename="aslprep", + config=load_data('reports-spec.yml'), + packagename='aslprep', ) - output_list_file = os.path.join(get_test_data_path(), f"expected_outputs_{test_name}.txt") + output_list_file = os.path.join(get_test_data_path(), f'expected_outputs_{test_name}.txt') check_generated_files(out_dir, output_list_file) def _run_and_fail(parameters): from aslprep import config - parameters.append("--stop-on-first-crash") - parameters.append("-vv") + parameters.append('--stop-on-first-crash') + parameters.append('-vv') parse_args(parameters) - config_file = config.execution.work_dir / f"config-{config.execution.run_uuid}.toml" + config_file = config.execution.work_dir / f'config-{config.execution.run_uuid}.toml' config.to_filename(config_file) - with pytest.raises(ValueError, match="Multi-delay data are not supported for PASL"): + with pytest.raises(ValueError, match='Multi-delay data are not supported for PASL'): build_workflow(config_file, retval={}) diff --git a/aslprep/tests/test_interfaces_cbf.py b/aslprep/tests/test_interfaces_cbf.py index 6216bc4d6..85f9db580 100644 --- a/aslprep/tests/test_interfaces_cbf.py +++ b/aslprep/tests/test_interfaces_cbf.py @@ -12,49 +12,49 @@ def test_computecbf_casl(datasets, tmp_path_factory): """Test aslprep.interfaces.cbf.ComputeCBF with (P)CASL.""" - tmpdir = tmp_path_factory.mktemp("test_computecbf_casl") - aslcontext_file = os.path.join(datasets["test_001"], "sub-01/perf/sub-01_aslcontext.tsv") + tmpdir = tmp_path_factory.mktemp('test_computecbf_casl') + aslcontext_file = os.path.join(datasets['test_001'], 'sub-01/perf/sub-01_aslcontext.tsv') aslcontext = pd.read_table(aslcontext_file) - n_deltam = aslcontext.loc[aslcontext["volume_type"] == "label"].shape[0] + n_deltam = aslcontext.loc[aslcontext['volume_type'] == 'label'].shape[0] n_volumes = aslcontext.shape[0] # Simulate ASL data and a brain mask. asl_data = np.random.random((30, 30, 30, n_deltam)).astype(np.float32) - asl_file = _save_img(asl_data, tmpdir, "asl.nii.gz") + asl_file = _save_img(asl_data, tmpdir, 'asl.nii.gz') asl_mask = np.zeros((30, 30, 30), dtype=np.uint8) asl_mask[10:20, 10:20, 10:20] = 1 - mask_file = _save_img(asl_mask, tmpdir, "mask.nii.gz") - m0_file = _save_img(asl_mask, tmpdir, "m0.nii.gz") + mask_file = _save_img(asl_mask, tmpdir, 'mask.nii.gz') + m0_file = _save_img(asl_mask, tmpdir, 'm0.nii.gz') single_pld = 1.5 plds = np.zeros(n_volumes) temp_plds = np.linspace(0.5, 3.5, n_deltam) - plds[aslcontext["volume_type"] == "m0scan"] = 0 - plds[aslcontext["volume_type"] == "label"] = temp_plds - plds[aslcontext["volume_type"] == "control"] = temp_plds + plds[aslcontext['volume_type'] == 'm0scan'] = 0 + plds[aslcontext['volume_type'] == 'label'] = temp_plds + plds[aslcontext['volume_type'] == 'control'] = temp_plds bad_multiple_plds = plds.tolist() - good_multiple_plds = plds[aslcontext["volume_type"] == "control"] + good_multiple_plds = plds[aslcontext['volume_type'] == 'control'] BASE_METADATA = { - "MagneticFieldStrength": 3, - "LabelingDuration": 1.6, + 'MagneticFieldStrength': 3, + 'LabelingDuration': 1.6, } ACQ_DICTS = [ - {"MRAcquisitionType": "3D"}, + {'MRAcquisitionType': '3D'}, { - "MRAcquisitionType": "2D", - "SliceTiming": list(np.linspace(0.1, 0.5, 30)), + 'MRAcquisitionType': '2D', + 'SliceTiming': list(np.linspace(0.1, 0.5, 30)), }, ] - for asltype in ["PCASL", "CASL"]: + for asltype in ['PCASL', 'CASL']: for acq_dict in ACQ_DICTS: # Scenario 1: PCASL with a single PostLabelingDelay # This should produce CBF time series and mean CBF, but no ATT metadata = { - "ArterialSpinLabelingType": asltype, - "PostLabelingDelay": single_pld, + 'ArterialSpinLabelingType': asltype, + 'PostLabelingDelay': single_pld, **BASE_METADATA, **acq_dict, } @@ -78,8 +78,8 @@ def test_computecbf_casl(datasets, tmp_path_factory): # Scenario 2: PCASL with one PostLabelingDelay for each volume (bad) metadata = { - "ArterialSpinLabelingType": asltype, - "PostLabelingDelay": bad_multiple_plds, + 'ArterialSpinLabelingType': asltype, + 'PostLabelingDelay': bad_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -92,14 +92,14 @@ def test_computecbf_casl(datasets, tmp_path_factory): m0_file=m0_file, mask=mask_file, ) - with pytest.raises(ValueError, match="Number of PostLabelingDelays"): + with pytest.raises(ValueError, match='Number of PostLabelingDelays'): results = interface.run(cwd=tmpdir) # Scenario 3: PCASL with one PostLabelingDelay for each deltam volume (good) # This should produce ATT and mean CBF volumes, but no CBF time series metadata = { - "ArterialSpinLabelingType": asltype, - "PostLabelingDelay": good_multiple_plds, + 'ArterialSpinLabelingType': asltype, + 'PostLabelingDelay': good_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -124,51 +124,51 @@ def test_computecbf_casl(datasets, tmp_path_factory): def test_computecbf_pasl(datasets, tmp_path_factory): """Test aslprep.interfaces.cbf.ComputeCBF with PASL.""" - tmpdir = tmp_path_factory.mktemp("test_computecbf_pasl") - aslcontext_file = os.path.join(datasets["test_001"], "sub-01/perf/sub-01_aslcontext.tsv") + tmpdir = tmp_path_factory.mktemp('test_computecbf_pasl') + aslcontext_file = os.path.join(datasets['test_001'], 'sub-01/perf/sub-01_aslcontext.tsv') aslcontext = pd.read_table(aslcontext_file) - n_deltam = aslcontext.loc[aslcontext["volume_type"] == "label"].shape[0] + n_deltam = aslcontext.loc[aslcontext['volume_type'] == 'label'].shape[0] n_volumes = aslcontext.shape[0] # Simulate ASL data and a brain mask. asl_data = np.random.random((30, 30, 30, n_deltam)).astype(np.float32) - asl_file = _save_img(asl_data, tmpdir, "asl.nii.gz") + asl_file = _save_img(asl_data, tmpdir, 'asl.nii.gz') asl_mask = np.zeros((30, 30, 30), dtype=np.uint8) asl_mask[10:20, 10:20, 10:20] = 1 - mask_file = _save_img(asl_mask, tmpdir, "mask.nii.gz") - m0_file = _save_img(asl_mask, tmpdir, "m0.nii.gz") + mask_file = _save_img(asl_mask, tmpdir, 'mask.nii.gz') + m0_file = _save_img(asl_mask, tmpdir, 'm0.nii.gz') single_pld = 1.5 plds = np.zeros(n_volumes) temp_plds = np.linspace(0.5, 3.5, n_deltam) - plds[aslcontext["volume_type"] == "m0scan"] = 0 - plds[aslcontext["volume_type"] == "label"] = temp_plds - plds[aslcontext["volume_type"] == "control"] = temp_plds + plds[aslcontext['volume_type'] == 'm0scan'] = 0 + plds[aslcontext['volume_type'] == 'label'] = temp_plds + plds[aslcontext['volume_type'] == 'control'] = temp_plds bad_multiple_plds = plds.tolist() - good_multiple_plds = plds[aslcontext["volume_type"] == "control"] + good_multiple_plds = plds[aslcontext['volume_type'] == 'control'] BASE_METADATA = { - "ArterialSpinLabelingType": "PASL", - "MagneticFieldStrength": 3, + 'ArterialSpinLabelingType': 'PASL', + 'MagneticFieldStrength': 3, } ACQ_DICTS = [ - {"MRAcquisitionType": "3D"}, + {'MRAcquisitionType': '3D'}, { - "MRAcquisitionType": "2D", - "SliceTiming": list(np.linspace(0.1, 0.5, 30)), + 'MRAcquisitionType': '2D', + 'SliceTiming': list(np.linspace(0.1, 0.5, 30)), }, ] for acq_dict in ACQ_DICTS: # Scenario 1: PASL without BolusCutOff (raises ValueError). metadata = { - "BolusCutOffFlag": False, - "PostLabelingDelay": single_pld, + 'BolusCutOffFlag': False, + 'PostLabelingDelay': single_pld, **BASE_METADATA, **acq_dict, } - with pytest.raises(ValueError, match="not supported in ASLPrep."): + with pytest.raises(ValueError, match='not supported in ASLPrep.'): interface = cbf.ComputeCBF( cbf_only=False, deltam=asl_file, @@ -182,10 +182,10 @@ def test_computecbf_pasl(datasets, tmp_path_factory): # Scenario 2: QUIPSS PASL with a single PostLabelingDelay # This should produce CBF time series and mean CBF, but no ATT metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "QUIPSS", - "BolusCutOffDelayTime": 0.5, - "PostLabelingDelay": single_pld, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'QUIPSS', + 'BolusCutOffDelayTime': 0.5, + 'PostLabelingDelay': single_pld, **BASE_METADATA, **acq_dict, } @@ -208,10 +208,10 @@ def test_computecbf_pasl(datasets, tmp_path_factory): # Scenario 3: QUIPSS PASL with one PostLabelingDelay for each volume (bad) metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "QUIPSS", - "BolusCutOffDelayTime": 0.5, - "PostLabelingDelay": bad_multiple_plds, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'QUIPSS', + 'BolusCutOffDelayTime': 0.5, + 'PostLabelingDelay': bad_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -223,15 +223,15 @@ def test_computecbf_pasl(datasets, tmp_path_factory): m0_file=m0_file, mask=mask_file, ) - with pytest.raises(ValueError, match="Multi-delay data are not supported"): + with pytest.raises(ValueError, match='Multi-delay data are not supported'): results = interface.run(cwd=tmpdir) # Scenario 4: QUIPSS PASL with one PostLabelingDelay for each deltam volume (good) metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "QUIPSS", - "BolusCutOffDelayTime": 0.5, - "PostLabelingDelay": good_multiple_plds, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'QUIPSS', + 'BolusCutOffDelayTime': 0.5, + 'PostLabelingDelay': good_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -243,16 +243,16 @@ def test_computecbf_pasl(datasets, tmp_path_factory): m0_file=m0_file, mask=mask_file, ) - with pytest.raises(ValueError, match="Multi-delay data are not supported"): + with pytest.raises(ValueError, match='Multi-delay data are not supported'): results = interface.run(cwd=tmpdir) # Scenario 5: QUIPSSII PASL with one PostLabelingDelay # This should produce CBF time series and mean CBF, but no ATT metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "QUIPSSII", - "BolusCutOffDelayTime": 0.5, - "PostLabelingDelay": single_pld, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'QUIPSSII', + 'BolusCutOffDelayTime': 0.5, + 'PostLabelingDelay': single_pld, **BASE_METADATA, **acq_dict, } @@ -275,10 +275,10 @@ def test_computecbf_pasl(datasets, tmp_path_factory): # Scenario 6: QUIPSSII PASL with multiple PostLabelingDelays metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "QUIPSSII", - "BolusCutOffDelayTime": 0.5, - "PostLabelingDelay": good_multiple_plds, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'QUIPSSII', + 'BolusCutOffDelayTime': 0.5, + 'PostLabelingDelay': good_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -290,16 +290,16 @@ def test_computecbf_pasl(datasets, tmp_path_factory): m0_file=m0_file, mask=mask_file, ) - with pytest.raises(ValueError, match="Multi-delay data are not supported"): + with pytest.raises(ValueError, match='Multi-delay data are not supported'): results = interface.run(cwd=tmpdir) # Scenario 7: Q2TIPS PASL with one PostLabelingDelay # This should produce CBF time series and mean CBF, but no ATT metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "Q2TIPS", - "BolusCutOffDelayTime": [0.7, 1.6], - "PostLabelingDelay": single_pld, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'Q2TIPS', + 'BolusCutOffDelayTime': [0.7, 1.6], + 'PostLabelingDelay': single_pld, **BASE_METADATA, **acq_dict, } @@ -322,10 +322,10 @@ def test_computecbf_pasl(datasets, tmp_path_factory): # Scenario 8: Q2TIPS PASL with multiple PostLabelingDelays metadata = { - "BolusCutOffFlag": True, - "BolusCutOffTechnique": "Q2TIPS", - "BolusCutOffDelayTime": [0.7, 1.6], - "PostLabelingDelay": good_multiple_plds, + 'BolusCutOffFlag': True, + 'BolusCutOffTechnique': 'Q2TIPS', + 'BolusCutOffDelayTime': [0.7, 1.6], + 'PostLabelingDelay': good_multiple_plds, **BASE_METADATA, **acq_dict, } @@ -337,7 +337,7 @@ def test_computecbf_pasl(datasets, tmp_path_factory): m0_file=m0_file, mask=mask_file, ) - with pytest.raises(ValueError, match="Multi-delay data are not supported"): + with pytest.raises(ValueError, match='Multi-delay data are not supported'): results = interface.run(cwd=tmpdir) @@ -346,35 +346,35 @@ def test_compare_slicetiming(datasets, tmp_path_factory): As long as the slice times are all zero, of course. """ - tmpdir = tmp_path_factory.mktemp("test_computecbf_casl") - aslcontext_file = os.path.join(datasets["test_001"], "sub-01/perf/sub-01_aslcontext.tsv") + tmpdir = tmp_path_factory.mktemp('test_computecbf_casl') + aslcontext_file = os.path.join(datasets['test_001'], 'sub-01/perf/sub-01_aslcontext.tsv') aslcontext = pd.read_table(aslcontext_file) - n_deltam = aslcontext.loc[aslcontext["volume_type"] == "label"].shape[0] + n_deltam = aslcontext.loc[aslcontext['volume_type'] == 'label'].shape[0] # Simulate ASL data and a brain mask. asl_data = np.random.random((30, 30, 30, n_deltam)).astype(np.float32) - asl_file = _save_img(asl_data, tmpdir, "asl.nii.gz") + asl_file = _save_img(asl_data, tmpdir, 'asl.nii.gz') asl_mask = np.zeros((30, 30, 30), dtype=np.uint8) asl_mask[10:20, 10:20, 10:20] = 1 - mask_file = _save_img(asl_mask, tmpdir, "mask.nii.gz") - m0_file = _save_img(asl_mask, tmpdir, "m0.nii.gz") + mask_file = _save_img(asl_mask, tmpdir, 'mask.nii.gz') + m0_file = _save_img(asl_mask, tmpdir, 'm0.nii.gz') ACQ_DICTS = [ - {"MRAcquisitionType": "3D"}, + {'MRAcquisitionType': '3D'}, { - "MRAcquisitionType": "2D", - "SliceTiming": list(np.zeros(30)), + 'MRAcquisitionType': '2D', + 'SliceTiming': list(np.zeros(30)), }, ] cbf_data = [] for acq_dict in ACQ_DICTS: metadata = { - "ArterialSpinLabelingType": "PCASL", - "MagneticFieldStrength": 3, - "LabelingDuration": 1.6, - "PostLabelingDelay": 1.5, + 'ArterialSpinLabelingType': 'PCASL', + 'MagneticFieldStrength': 3, + 'LabelingDuration': 1.6, + 'PostLabelingDelay': 1.5, **acq_dict, } interface = cbf.ComputeCBF( diff --git a/aslprep/tests/test_parser.py b/aslprep/tests/test_parser.py index 7959243e5..3fcafdc38 100644 --- a/aslprep/tests/test_parser.py +++ b/aslprep/tests/test_parser.py @@ -7,16 +7,16 @@ from aslprep.cli import version as _version from aslprep.cli.parser import _build_parser -MIN_ARGS = ["data/", "out/", "participant"] +MIN_ARGS = ['data/', 'out/', 'participant'] @pytest.mark.parametrize( - "args,code", + 'args,code', [ ([], 2), (MIN_ARGS, 2), # bids_dir does not exist - (MIN_ARGS + ["--fs-license-file"], 2), - (MIN_ARGS + ["--fs-license-file", "fslicense.txt"], 2), + (MIN_ARGS + ['--fs-license-file'], 2), + (MIN_ARGS + ['--fs-license-file', 'fslicense.txt'], 2), ], ) def test_parser_errors(args, code): @@ -27,17 +27,17 @@ def test_parser_errors(args, code): assert error.value.code == code -@pytest.mark.parametrize("args", [MIN_ARGS, MIN_ARGS + ["--fs-license-file"]]) +@pytest.mark.parametrize('args', [MIN_ARGS, MIN_ARGS + ['--fs-license-file']]) def test_parser_valid(tmp_path, args): """Check valid arguments.""" - datapath = tmp_path / "data" + datapath = tmp_path / 'data' datapath.mkdir(exist_ok=True) args[0] = str(datapath) - if "--fs-license-file" in args: - _fs_file = tmp_path / "license.txt" - _fs_file.write_text("") - args.insert(args.index("--fs-license-file") + 1, str(_fs_file.absolute())) + if '--fs-license-file' in args: + _fs_file = tmp_path / 'license.txt' + _fs_file.write_text('') + args.insert(args.index('--fs-license-file') + 1, str(_fs_file.absolute())) opts = _build_parser().parse_args(args) @@ -45,36 +45,36 @@ def test_parser_valid(tmp_path, args): @pytest.mark.parametrize( - "argval,gb", + 'argval,gb', [ - ("1G", 1), - ("1GB", 1), - ("1000", 1), # Default units are MB - ("32000", 32), # Default units are MB - ("4000", 4), # Default units are MB - ("1000M", 1), - ("1000MB", 1), - ("1T", 1000), - ("1TB", 1000), - ("%dK" % 1e6, 1), - ("%dKB" % 1e6, 1), - ("%dB" % 1e9, 1), + ('1G', 1), + ('1GB', 1), + ('1000', 1), # Default units are MB + ('32000', 32), # Default units are MB + ('4000', 4), # Default units are MB + ('1000M', 1), + ('1000MB', 1), + ('1T', 1000), + ('1TB', 1000), + ('%dK' % 1e6, 1), + ('%dKB' % 1e6, 1), + ('%dB' % 1e9, 1), ], ) def test_memory_arg(tmp_path, argval, gb): """Check the correct parsing of the memory argument.""" - datapath = tmp_path / "data" + datapath = tmp_path / 'data' datapath.mkdir(exist_ok=True) - _fs_file = tmp_path / "license.txt" - _fs_file.write_text("") + _fs_file = tmp_path / 'license.txt' + _fs_file.write_text('') - args = MIN_ARGS + ["--fs-license-file", str(_fs_file)] + ["--mem", argval] + args = MIN_ARGS + ['--fs-license-file', str(_fs_file)] + ['--mem', argval] opts = _build_parser().parse_args(args) assert opts.memory_gb == gb -@pytest.mark.parametrize("current,latest", [("1.0.0", "1.3.2"), ("1.3.2", "1.3.2")]) +@pytest.mark.parametrize('current,latest', [('1.0.0', '1.3.2'), ('1.3.2', '1.3.2')]) def test_get_parser_update(monkeypatch, capsys, current, latest): """Make sure the out-of-date banner is shown.""" expectation = Version(current) < Version(latest) @@ -82,8 +82,8 @@ def test_get_parser_update(monkeypatch, capsys, current, latest): def _mock_check_latest(*args, **kwargs): return Version(latest) - monkeypatch.setattr(config.environment, "version", current) - monkeypatch.setattr(_version, "check_latest", _mock_check_latest) + monkeypatch.setattr(config.environment, 'version', current) + monkeypatch.setattr(_version, 'check_latest', _mock_check_latest) _build_parser() captured = capsys.readouterr().err @@ -96,18 +96,18 @@ def _mock_check_latest(*args, **kwargs): assert (msg in captured) is expectation -@pytest.mark.parametrize("flagged", [(True, None), (True, "random reason"), (False, None)]) +@pytest.mark.parametrize('flagged', [(True, None), (True, 'random reason'), (False, None)]) def test_get_parser_blacklist(monkeypatch, capsys, flagged): """Make sure the blacklisting banner is shown.""" def _mock_is_bl(*args, **kwargs): return flagged - monkeypatch.setattr(_version, "is_flagged", _mock_is_bl) + monkeypatch.setattr(_version, 'is_flagged', _mock_is_bl) _build_parser() captured = capsys.readouterr().err - assert ("FLAGGED" in captured) is flagged[0] + assert ('FLAGGED' in captured) is flagged[0] if flagged[0]: - assert (flagged[1] or "reason: unknown") in captured + assert (flagged[1] or 'reason: unknown') in captured diff --git a/aslprep/tests/test_version.py b/aslprep/tests/test_version.py index 1760f4074..8b8fc47b9 100644 --- a/aslprep/tests/test_version.py +++ b/aslprep/tests/test_version.py @@ -14,7 +14,7 @@ class MockResponse: """Mocks the requests module so that Pypi is not actually queried.""" status_code = 200 - _json = {"releases": {"1.0.0": None, "1.0.1": None, "1.1.0": None, "1.1.1rc1": None}} + _json = {'releases': {'1.0.0': None, '1.0.1': None, '1.1.0': None, '1.1.1rc1': None}} def __init__(self, code=200, json=None): """Allow setting different response codes.""" @@ -28,24 +28,24 @@ def json(self): @pytest.mark.parametrize( - ("result", "code", "json"), + ('result', 'code', 'json'), [ (None, 404, None), - (None, 200, {"releases": {"1.0.0rc1": None}}), - (Version("1.1.0"), 200, None), - (Version("1.0.0"), 200, {"releases": {"1.0.0": None}}), + (None, 200, {'releases': {'1.0.0rc1': None}}), + (Version('1.1.0'), 200, None), + (Version('1.0.0'), 200, {'releases': {'1.0.0': None}}), ], ) def test_check_latest2(tmpdir, monkeypatch, result, code, json): """Test latest version check with varying server responses.""" tmpdir.chdir() - monkeypatch.setenv("HOME", str(tmpdir)) + monkeypatch.setenv('HOME', str(tmpdir)) assert str(Path.home()) == str(tmpdir) def mock_get(*args, **kwargs): return MockResponse(code=code, json=json) - monkeypatch.setattr(requests, "get", mock_get) + monkeypatch.setattr(requests, 'get', mock_get) v = check_latest() if result is None: @@ -56,62 +56,62 @@ def mock_get(*args, **kwargs): @pytest.mark.parametrize( - "bad_cache", + 'bad_cache', [ - "3laj#r???d|3akajdf#", - "2.0.0|3akajdf#", - "|".join(("2.0.0", datetime.now().strftime(DATE_FMT), "")), - "", + '3laj#r???d|3akajdf#', + '2.0.0|3akajdf#', + '|'.join(('2.0.0', datetime.now().strftime(DATE_FMT), '')), + '', ], ) def test_check_latest3(tmpdir, monkeypatch, bad_cache): """Test latest version check when the cache file is corrupted.""" tmpdir.chdir() - monkeypatch.setenv("HOME", str(tmpdir)) + monkeypatch.setenv('HOME', str(tmpdir)) assert str(Path.home()) == str(tmpdir) def mock_get(*args, **kwargs): return MockResponse() - monkeypatch.setattr(requests, "get", mock_get) + monkeypatch.setattr(requests, 'get', mock_get) # Initially, cache should not exist - cachefile = Path.home() / ".cache" / "aslprep" / "latest" + cachefile = Path.home() / '.cache' / 'aslprep' / 'latest' cachefile.parent.mkdir(parents=True, exist_ok=True) assert not cachefile.exists() cachefile.write_text(bad_cache) v = check_latest() assert isinstance(v, Version) - assert v == Version("1.1.0") + assert v == Version('1.1.0') @pytest.mark.parametrize( - ("result", "version", "code", "json"), + ('result', 'version', 'code', 'json'), [ - (False, "1.2.1", 200, {"flagged": {"1.0.0": None}}), - (True, "1.2.1", 200, {"flagged": {"1.2.1": None}}), - (True, "1.2.1", 200, {"flagged": {"1.2.1": "FATAL Bug!"}}), - (False, "1.2.1", 404, {"flagged": {"1.0.0": None}}), - (False, "1.2.1", 200, {"flagged": []}), - (False, "1.2.1", 200, {}), + (False, '1.2.1', 200, {'flagged': {'1.0.0': None}}), + (True, '1.2.1', 200, {'flagged': {'1.2.1': None}}), + (True, '1.2.1', 200, {'flagged': {'1.2.1': 'FATAL Bug!'}}), + (False, '1.2.1', 404, {'flagged': {'1.0.0': None}}), + (False, '1.2.1', 200, {'flagged': []}), + (False, '1.2.1', 200, {}), ], ) def test_is_flagged(monkeypatch, result, version, code, json): """Test that the flagged-versions check is correct.""" - monkeypatch.setattr(_version, "__version__", version) + monkeypatch.setattr(_version, '__version__', version) def mock_get(*args, **kwargs): return MockResponse(code=code, json=json) - monkeypatch.setattr(requests, "get", mock_get) + monkeypatch.setattr(requests, 'get', mock_get) val, reason = is_flagged() assert val is result test_reason = None if val: - test_reason = json.get("flagged", {}).get(version, None) + test_reason = json.get('flagged', {}).get(version, None) if test_reason is not None: assert reason == test_reason diff --git a/aslprep/tests/testing.py b/aslprep/tests/testing.py index 0519d0f66..8938accbc 100644 --- a/aslprep/tests/testing.py +++ b/aslprep/tests/testing.py @@ -54,10 +54,10 @@ def get_io_names(pre, ios): actual_outputs = [] node_tuples = [(node.name, node.inputs.items(), node.outputs.items()) for node in nodes] for name, inputs, outputs in node_tuples: - pre = str(name) + "." + pre = str(name) + '.' actual_inputs += get_io_names(pre, inputs) - pre = pre if pre[0:-1] != "inputnode" else "" + pre = pre if pre[0:-1] != 'inputnode' else '' actual_outputs += get_io_names(pre, outputs) return actual_inputs, actual_outputs @@ -90,8 +90,8 @@ def assert_inputs_set(self, workflow, additional_inputs=None): """ additional_inputs = additional_inputs or {} - dummy_node = pe.Node(niu.IdentityInterface(fields=["dummy"]), name="DummyNode") - node_names = [name for name in workflow.list_node_names() if name.count(".") == 0] + dummy_node = pe.Node(niu.IdentityInterface(fields=['dummy']), name='DummyNode') + node_names = [name for name in workflow.list_node_names() if name.count('.') == 0] for node_name in set(node_names + list(additional_inputs.keys())): node = workflow.get_node(node_name) mandatory_inputs = list(node.inputs.traits(mandatory=True).keys()) @@ -103,4 +103,4 @@ def assert_inputs_set(self, workflow, additional_inputs=None): # maybe it is connected to an output with self.assertRaises(Exception): # throws an error if the input is already connected - workflow.connect([(dummy_node, node, [("dummy", field)])]) + workflow.connect([(dummy_node, node, [('dummy', field)])]) diff --git a/aslprep/tests/tests.py b/aslprep/tests/tests.py index 1b263101a..e7bd26564 100644 --- a/aslprep/tests/tests.py +++ b/aslprep/tests/tests.py @@ -37,14 +37,14 @@ def mock_config(): """Create a mock config for documentation and testing purposes.""" from aslprep import config - _old_fs = os.getenv("FREESURFER_HOME") + _old_fs = os.getenv('FREESURFER_HOME') if not _old_fs: - os.environ["FREESURFER_HOME"] = mkdtemp() + os.environ['FREESURFER_HOME'] = mkdtemp() - filename = Path(load_data("../tests/data/config.toml")) + filename = Path(load_data('../tests/data/config.toml')) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(config, sectionname) section.load(configs, init=False) config.nipype.omp_nthreads = 1 @@ -53,7 +53,7 @@ def mock_config(): config.init_spaces() config.execution.work_dir = Path(mkdtemp()) - config.execution.bids_dir = load_data("../tests/data/ds000240").absolute() + config.execution.bids_dir = load_data('../tests/data/ds000240').absolute() config.execution.aslprep_dir = Path(mkdtemp()) config.execution.init() @@ -63,4 +63,4 @@ def mock_config(): shutil.rmtree(config.execution.aslprep_dir) if not _old_fs: - del os.environ["FREESURFER_HOME"] + del os.environ['FREESURFER_HOME'] diff --git a/aslprep/tests/utils.py b/aslprep/tests/utils.py index db15561a9..d33022cca 100644 --- a/aslprep/tests/utils.py +++ b/aslprep/tests/utils.py @@ -15,30 +15,30 @@ from aslprep import config -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def download_test_data(dset, data_dir=None): """Download test data.""" URLS = { - "anatomical": ( - "https://upenn.box.com/shared/static/310po9aj05iczko0qvlp21gk19upj82d.tar.gz" + 'anatomical': ( + 'https://upenn.box.com/shared/static/310po9aj05iczko0qvlp21gk19upj82d.tar.gz' ), - "examples_pasl_multipld": ( - "https://upenn.box.com/shared/static/njb5tqs2n53775qumtwc1wyxo5362sp7.tar.gz" + 'examples_pasl_multipld': ( + 'https://upenn.box.com/shared/static/njb5tqs2n53775qumtwc1wyxo5362sp7.tar.gz' ), - "examples_pcasl_multipld": ( - "https://upenn.box.com/shared/static/pm0ysafvg69jimk1bcm3ewtljiwzk899.tar.gz" + 'examples_pcasl_multipld': ( + 'https://upenn.box.com/shared/static/pm0ysafvg69jimk1bcm3ewtljiwzk899.tar.gz' ), - "examples_pcasl_singlepld": ( - "https://upenn.box.com/shared/static/il6cfea6f0wjnmjjvcpg6baw3e7yrwa3.tar.gz" + 'examples_pcasl_singlepld': ( + 'https://upenn.box.com/shared/static/il6cfea6f0wjnmjjvcpg6baw3e7yrwa3.tar.gz' ), - "qtab": "https://upenn.box.com/shared/static/ap5ftlc3logivmj03fabuahv4etqk7jf.tar.gz", - "test_001": "https://upenn.box.com/shared/static/cudw5yyh3j6jwymmlzdw2nwc6knmxdu9.tar.gz", - "test_002": "https://upenn.box.com/shared/static/wpuvn06zl4v5nwd9o8tysyfs3kg4a2p0.tar.gz", - "test_003": "https://upenn.box.com/shared/static/1c64kn7btb5dodksnn06wer2kfk00px5.tar.gz", + 'qtab': 'https://upenn.box.com/shared/static/ap5ftlc3logivmj03fabuahv4etqk7jf.tar.gz', + 'test_001': 'https://upenn.box.com/shared/static/cudw5yyh3j6jwymmlzdw2nwc6knmxdu9.tar.gz', + 'test_002': 'https://upenn.box.com/shared/static/wpuvn06zl4v5nwd9o8tysyfs3kg4a2p0.tar.gz', + 'test_003': 'https://upenn.box.com/shared/static/1c64kn7btb5dodksnn06wer2kfk00px5.tar.gz', } - if dset == "*": + if dset == '*': for k in URLS: download_test_data(k, data_dir=data_dir) @@ -48,18 +48,18 @@ def download_test_data(dset, data_dir=None): raise ValueError(f"dset ({dset}) must be one of: {', '.join(URLS.keys())}") if not data_dir: - data_dir = os.path.join(os.path.dirname(get_test_data_path()), "test_data") + data_dir = os.path.join(os.path.dirname(get_test_data_path()), 'test_data') out_dir = os.path.join(data_dir, dset) if os.path.isdir(out_dir): config.loggers.utils.info( - f"Dataset {dset} already exists. " - "If you need to re-download the data, please delete the folder." + f'Dataset {dset} already exists. ' + 'If you need to re-download the data, please delete the folder.' ) return out_dir else: - config.loggers.utils.info(f"Downloading {dset} to {out_dir}") + config.loggers.utils.info(f'Downloading {dset} to {out_dir}') os.makedirs(out_dir, exist_ok=True) with requests.get(URLS[dset], stream=True) as req: @@ -75,21 +75,21 @@ def get_test_data_path(): Test-related data are kept in tests folder in "data". Based on function by Yaroslav Halchenko used in Neurosynth Python package. """ - return os.path.abspath(os.path.join(os.path.dirname(__file__), "data") + os.path.sep) + return os.path.abspath(os.path.join(os.path.dirname(__file__), 'data') + os.path.sep) def check_generated_files(aslprep_dir, output_list_file): """Compare files generated by aslprep with a list of expected files.""" - found_files = sorted(glob(os.path.join(aslprep_dir, "**/*"), recursive=True)) + found_files = sorted(glob(os.path.join(aslprep_dir, '**/*'), recursive=True)) found_files = [os.path.relpath(f, aslprep_dir) for f in found_files] # Ignore figures - found_files = [f for f in found_files if "figures" not in f] + found_files = [f for f in found_files if 'figures' not in f] # Ignore logs - found_files = [f for f in found_files if "log" not in f.split(os.path.sep)] + found_files = [f for f in found_files if 'log' not in f.split(os.path.sep)] - with open(output_list_file, "r") as fo: + with open(output_list_file) as fo: expected_files = fo.readlines() expected_files = [f.rstrip() for f in expected_files] @@ -97,14 +97,14 @@ def check_generated_files(aslprep_dir, output_list_file): expected_not_found = sorted(list(set(expected_files) - set(found_files))) found_not_expected = sorted(list(set(found_files) - set(expected_files))) - msg = "" + msg = '' if expected_not_found: - msg += "\nExpected but not found:\n\t" - msg += "\n\t".join(expected_not_found) + msg += '\nExpected but not found:\n\t' + msg += '\n\t'.join(expected_not_found) if found_not_expected: - msg += "\nFound but not expected:\n\t" - msg += "\n\t".join(found_not_expected) + msg += '\nFound but not expected:\n\t' + msg += '\n\t'.join(found_not_expected) raise ValueError(msg) @@ -112,67 +112,67 @@ def check_affines(data_dir, out_dir, input_type): """Confirm affines don't change across XCP runs.""" fmri_layout = BIDSLayout(str(data_dir), validate=False, derivatives=False) xcp_layout = BIDSLayout(str(out_dir), validate=False, derivatives=False) - if input_type == "cifti": # Get the .dtseries.nii + if input_type == 'cifti': # Get the .dtseries.nii denoised_files = xcp_layout.get( - invalid_filters="allow", - datatype="func", + invalid_filters='allow', + datatype='func', run=1, - extension=".dtseries.nii", + extension='.dtseries.nii', ) - space = denoised_files[0].get_entities()["space"] + space = denoised_files[0].get_entities()['space'] bold_files = fmri_layout.get( - invalid_filters="allow", - datatype="func", + invalid_filters='allow', + datatype='func', run=1, space=space, - extension=".dtseries.nii", + extension='.dtseries.nii', ) - elif input_type == "nifti": # Get the .nii.gz + elif input_type == 'nifti': # Get the .nii.gz # Problem: it's collecting native-space data denoised_files = xcp_layout.get( - datatype="func", + datatype='func', run=1, - suffix="bold", - extension=".nii.gz", + suffix='bold', + extension='.nii.gz', ) - space = denoised_files[0].get_entities()["space"] + space = denoised_files[0].get_entities()['space'] bold_files = fmri_layout.get( - invalid_filters="allow", - datatype="func", + invalid_filters='allow', + datatype='func', run=1, space=space, - suffix="bold", - extension=".nii.gz", + suffix='bold', + extension='.nii.gz', ) else: # Nibabies denoised_files = xcp_layout.get( - datatype="func", - space="MNIInfant", - suffix="bold", - extension=".nii.gz", + datatype='func', + space='MNIInfant', + suffix='bold', + extension='.nii.gz', ) bold_files = fmri_layout.get( - invalid_filters="allow", - datatype="func", - space="MNIInfant", - suffix="bold", - extension=".nii.gz", + invalid_filters='allow', + datatype='func', + space='MNIInfant', + suffix='bold', + extension='.nii.gz', ) bold_file = bold_files[0].path denoised_file = denoised_files[0].path - if input_type == "cifti": + if input_type == 'cifti': assert ( nb.load(bold_file)._nifti_header.get_intent() == nb.load(denoised_file)._nifti_header.get_intent() ) elif not np.array_equal(nb.load(bold_file).affine, nb.load(denoised_file).affine): - raise AssertionError(f"Affines do not match:\n\t{bold_file}\n\t{denoised_file}") + raise AssertionError(f'Affines do not match:\n\t{bold_file}\n\t{denoised_file}') - print("No affines changed.") + print('No affines changed.') @contextmanager @@ -195,14 +195,14 @@ def reorder_expected_outputs(): This function is called manually by devs when they modify the test outputs. """ test_data_path = get_test_data_path() - expected_output_files = sorted(glob(os.path.join(test_data_path, "expected_outputs_*.txt"))) + expected_output_files = sorted(glob(os.path.join(test_data_path, 'expected_outputs_*.txt'))) for expected_output_file in expected_output_files: - LOGGER.info(f"Sorting {expected_output_file}") + LOGGER.info(f'Sorting {expected_output_file}') - with open(expected_output_file, "r") as fo: + with open(expected_output_file) as fo: file_contents = fo.readlines() file_contents = sorted(file_contents) - with open(expected_output_file, "w") as fo: + with open(expected_output_file, 'w') as fo: fo.writelines(file_contents) diff --git a/aslprep/utils/__init__.py b/aslprep/utils/__init__.py index de0bb9f30..e242da036 100644 --- a/aslprep/utils/__init__.py +++ b/aslprep/utils/__init__.py @@ -3,12 +3,12 @@ from aslprep.utils import asl, atlas, bids, cbf, confounds, misc, plotting, sentry __all__ = [ - "asl", - "atlas", - "bids", - "cbf", - "confounds", - "misc", - "plotting", - "sentry", + 'asl', + 'atlas', + 'bids', + 'cbf', + 'confounds', + 'misc', + 'plotting', + 'sentry', ] diff --git a/aslprep/utils/asl.py b/aslprep/utils/asl.py index 9977c735a..92b812c92 100644 --- a/aslprep/utils/asl.py +++ b/aslprep/utils/asl.py @@ -9,11 +9,11 @@ def pcasl_or_pasl(metadata): """Determine if metadata indicates a PCASL or ASL scan.""" - aslt = metadata["ArterialSpinLabelingType"] + aslt = metadata['ArterialSpinLabelingType'] - if aslt in ["CASL", "PCASL"]: + if aslt in ['CASL', 'PCASL']: is_casl = True - elif aslt == "PASL": + elif aslt == 'PASL': is_casl = False else: raise ValueError( @@ -37,7 +37,7 @@ def determine_multi_pld(metadata): :obj:`bool` True if the data are multi-delay/TI. Fale if not. """ - plds = np.array(metadata["PostLabelingDelay"]) + plds = np.array(metadata['PostLabelingDelay']) return np.unique(plds).size > 1 @@ -50,14 +50,14 @@ def select_processing_target(aslcontext): except: raise FileNotFoundError(aslcontext) - voltypes = aslcontext_df["volume_type"].tolist() + voltypes = aslcontext_df['volume_type'].tolist() - if "control" in voltypes and "label" in voltypes: - processing_target = "control" - elif "deltam" in voltypes: - processing_target = "deltam" - elif "cbf" in voltypes: - processing_target = "cbf" + if 'control' in voltypes and 'label' in voltypes: + processing_target = 'control' + elif 'deltam' in voltypes: + processing_target = 'deltam' + elif 'cbf' in voltypes: + processing_target = 'cbf' else: raise ValueError("aslcontext doesn't have control, label, deltam, or cbf volumes.") @@ -91,26 +91,26 @@ def estimate_labeling_efficiency(metadata): ---------- .. footbibliography:: """ - if "LabelingEfficiency" in metadata.keys(): - labeleff = metadata["LabelingEfficiency"] + if 'LabelingEfficiency' in metadata.keys(): + labeleff = metadata['LabelingEfficiency'] else: BASE_LABELEFF = { - "CASL": 0.68, - "PCASL": 0.85, - "PASL": 0.98, + 'CASL': 0.68, + 'PCASL': 0.85, + 'PASL': 0.98, } - labeleff = BASE_LABELEFF[metadata["ArterialSpinLabelingType"]] + labeleff = BASE_LABELEFF[metadata['ArterialSpinLabelingType']] - if metadata.get("BackgroundSuppression", False): + if metadata.get('BackgroundSuppression', False): BS_PULSE_EFF = 0.95 # hardcoded BackgroundSuppressionPulse efficiency # We assume there was one pulse if suppression was applied, # but the number of pulses isn't defined. - labeleff *= BS_PULSE_EFF ** metadata.get("BackgroundSuppressionNumberPulses", 1) + labeleff *= BS_PULSE_EFF ** metadata.get('BackgroundSuppressionNumberPulses', 1) return labeleff -def get_inflow_times(metadata: "dict[str, Any]", is_casl: bool) -> list: +def get_inflow_times(metadata: dict[str, Any], is_casl: bool) -> list: """Determine the appropriate inflow times for BASIL. For PASL data, the inflow time (TI) is just the post-labeling delay (PLD). @@ -131,12 +131,12 @@ def get_inflow_times(metadata: "dict[str, Any]", is_casl: bool) -> list: import numpy as np if is_casl: - return np.add(metadata["PostLabelingDelay"], metadata["LabelingDuration"]).tolist() + return np.add(metadata['PostLabelingDelay'], metadata['LabelingDuration']).tolist() else: - return np.array(metadata["PostLabelingDelay"]).tolist() + return np.array(metadata['PostLabelingDelay']).tolist() -def get_bolus_duration(metadata: "dict[str, Any]", is_casl: bool) -> float: +def get_bolus_duration(metadata: dict[str, Any], is_casl: bool) -> float: """Determine the appropriate bolus duration for BASIL. For PASL data, the bolus cutoff delay is the first BolusCutOffDelayTime. @@ -155,26 +155,26 @@ def get_bolus_duration(metadata: "dict[str, Any]", is_casl: bool) -> float: The bolus value. """ if is_casl: - return metadata["LabelingDuration"] - elif not metadata["BolusCutOffFlag"]: - raise ValueError("PASL without a bolus cutoff technique is not supported.") - elif metadata["BolusCutOffTechnique"] == "Q2TIPS": + return metadata['LabelingDuration'] + elif not metadata['BolusCutOffFlag']: + raise ValueError('PASL without a bolus cutoff technique is not supported.') + elif metadata['BolusCutOffTechnique'] == 'Q2TIPS': # BolusCutOffDelayTime is a list, and the first entry should be used. - return metadata["BolusCutOffDelayTime"][0] + return metadata['BolusCutOffDelayTime'][0] else: # QUIPSS or QUIPSSII - return metadata["BolusCutOffDelayTime"] + return metadata['BolusCutOffDelayTime'] def reduce_metadata_lists(metadata, n_volumes, keep_idx): """Reduce any volume-wise metadata fields to only contain values for selected volumes.""" # A hardcoded list of fields that may have one value for each volume. VOLUME_WISE_FIELDS = [ - "PostLabelingDelay", - "VascularCrushingVENC", - "LabelingDuration", - "EchoTime", - "FlipAngle", - "RepetitionTimePreparation", + 'PostLabelingDelay', + 'VascularCrushingVENC', + 'LabelingDuration', + 'EchoTime', + 'FlipAngle', + 'RepetitionTimePreparation', ] for field in VOLUME_WISE_FIELDS: diff --git a/aslprep/utils/atlas.py b/aslprep/utils/atlas.py index 982ab5595..b2c4e5d18 100644 --- a/aslprep/utils/atlas.py +++ b/aslprep/utils/atlas.py @@ -23,26 +23,26 @@ def get_atlas_names(subset): List of atlases. """ atlases = { - "cortical": [ - "4S156Parcels", - "4S256Parcels", - "4S356Parcels", - "4S456Parcels", - "4S556Parcels", - "4S656Parcels", - "4S756Parcels", - "4S856Parcels", - "4S956Parcels", - "4S1056Parcels", - "Glasser", - "Gordon", + 'cortical': [ + '4S156Parcels', + '4S256Parcels', + '4S356Parcels', + '4S456Parcels', + '4S556Parcels', + '4S656Parcels', + '4S756Parcels', + '4S856Parcels', + '4S956Parcels', + '4S1056Parcels', + 'Glasser', + 'Gordon', ], - "subcortical": [ - "Tian", - "HCP", + 'subcortical': [ + 'Tian', + 'HCP', ], } - atlases["all"] = sorted(list(set(atlases["cortical"] + atlases["subcortical"]))) + atlases['all'] = sorted(list(set(atlases['cortical'] + atlases['subcortical']))) return atlases[subset] @@ -74,29 +74,29 @@ def get_atlas_nifti(atlas_name): from aslprep.data import load as load_data - if "4S" in atlas_name or atlas_name in ("Glasser", "Gordon"): + if '4S' in atlas_name or atlas_name in ('Glasser', 'Gordon'): # 1 mm3 atlases - atlas_fname = f"tpl-MNI152NLin6Asym_atlas-{atlas_name}_res-01_dseg.nii.gz" - tsv_fname = f"atlas-{atlas_name}_dseg.tsv" + atlas_fname = f'tpl-MNI152NLin6Asym_atlas-{atlas_name}_res-01_dseg.nii.gz' + tsv_fname = f'atlas-{atlas_name}_dseg.tsv' else: # 2 mm3 atlases - atlas_fname = f"tpl-MNI152NLin6Asym_atlas-{atlas_name}_res-02_dseg.nii.gz" - tsv_fname = f"atlas-{atlas_name}_dseg.tsv" + atlas_fname = f'tpl-MNI152NLin6Asym_atlas-{atlas_name}_res-02_dseg.nii.gz' + tsv_fname = f'atlas-{atlas_name}_dseg.tsv' - if "4S" in atlas_name: - atlas_file = join("/AtlasPack", atlas_fname) - atlas_labels_file = join("/AtlasPack", tsv_fname) - atlas_metadata_file = f"/AtlasPack/tpl-MNI152NLin6Asym_atlas-{atlas_name}_dseg.json" + if '4S' in atlas_name: + atlas_file = join('/AtlasPack', atlas_fname) + atlas_labels_file = join('/AtlasPack', tsv_fname) + atlas_metadata_file = f'/AtlasPack/tpl-MNI152NLin6Asym_atlas-{atlas_name}_dseg.json' else: - atlas_file = load_data(f"atlases/{atlas_fname}").absolute() - atlas_labels_file = load_data(f"atlases/{tsv_fname}").absolute() + atlas_file = load_data(f'atlases/{atlas_fname}').absolute() + atlas_labels_file = load_data(f'atlases/{tsv_fname}').absolute() atlas_metadata_file = load_data( - f"atlases/tpl-MNI152NLin6Asym_atlas-{atlas_name}_dseg.json", + f'atlases/tpl-MNI152NLin6Asym_atlas-{atlas_name}_dseg.json', ).absolute() if not (isfile(atlas_file) and isfile(atlas_labels_file) and isfile(atlas_metadata_file)): raise FileNotFoundError( - f"File(s) DNE:\n\t{atlas_file}\n\t{atlas_labels_file}\n\t{atlas_metadata_file}" + f'File(s) DNE:\n\t{atlas_file}\n\t{atlas_labels_file}\n\t{atlas_metadata_file}' ) return atlas_file, atlas_labels_file, atlas_metadata_file diff --git a/aslprep/utils/bids.py b/aslprep/utils/bids.py index efaa78460..873e143d3 100644 --- a/aslprep/utils/bids.py +++ b/aslprep/utils/bids.py @@ -6,7 +6,6 @@ import json import os import sys -import typing as ty from collections import defaultdict from pathlib import Path @@ -24,13 +23,13 @@ def collect_data( ): """Use pybids to retrieve the input data for a given participant.""" queries = { - "fmap": {"datatype": "fmap"}, - "flair": {"datatype": "anat", "suffix": "FLAIR"}, - "t2w": {"datatype": "anat", "suffix": "T2w"}, - "t1w": {"datatype": "anat", "suffix": "T1w"}, - "roi": {"datatype": "anat", "suffix": "roi"}, - "sbref": {"datatype": "perf", "suffix": "sbref"}, - "asl": {"datatype": "perf", "suffix": "asl"}, + 'fmap': {'datatype': 'fmap'}, + 'flair': {'datatype': 'anat', 'suffix': 'FLAIR'}, + 't2w': {'datatype': 'anat', 'suffix': 'T2w'}, + 't1w': {'datatype': 'anat', 'suffix': 'T1w'}, + 'roi': {'datatype': 'anat', 'suffix': 'roi'}, + 'sbref': {'datatype': 'perf', 'suffix': 'sbref'}, + 'asl': {'datatype': 'perf', 'suffix': 'asl'}, } bids_filters = bids_filters or {} @@ -40,9 +39,9 @@ def collect_data( subj_data = { dtype: sorted( layout.get( - return_type="file", + return_type='file', subject=participant_label, - extension=["nii", "nii.gz"], + extension=['nii', 'nii.gz'], **query, ) ) @@ -55,8 +54,8 @@ def collect_data( def collect_run_data(layout, asl_file): """Use pybids to retrieve the input data for a given participant.""" queries = { - "aslcontext": {"suffix": "aslcontext", "extension": ".tsv"}, - "sbref": {"suffix": "sbref", "extension": [".nii", ".nii.gz"]}, + 'aslcontext': {'suffix': 'aslcontext', 'extension': '.tsv'}, + 'sbref': {'suffix': 'sbref', 'extension': ['.nii', '.nii.gz']}, } bids_file = layout.get_file(asl_file) @@ -64,46 +63,46 @@ def collect_run_data(layout, asl_file): run_data = { dtype: layout.get_nearest( bids_file.path, - return_type="file", + return_type='file', strict=False, # aslcontext files aren't grabbed when strict=True, for some reason **query, ) for dtype, query in queries.items() } - if "sbref" in config.workflow.ignore: - config.loggers.workflow.info("Single-band reference files ignored.") - run_data["sbref"] = None + if 'sbref' in config.workflow.ignore: + config.loggers.workflow.info('Single-band reference files ignored.') + run_data['sbref'] = None # The aslcontext file is required - if not run_data["aslcontext"]: - raise FileNotFoundError(f"aslcontext file for {asl_file} not found.") + if not run_data['aslcontext']: + raise FileNotFoundError(f'aslcontext file for {asl_file} not found.') # Now let's look for an m0scan m0scan_candidates = [ f - for f in bids_file.get_associations(kind="InformedBy") - if f.entities["suffix"] == "m0scan" + for f in bids_file.get_associations(kind='InformedBy') + if f.entities['suffix'] == 'm0scan' ] if m0scan_candidates: if len(m0scan_candidates) > 1: config.loggers.workflow.warning( - f"More than one M0 file found for {asl_file}. " - f"Using the first one ({m0scan_candidates[0].path})" + f'More than one M0 file found for {asl_file}. ' + f'Using the first one ({m0scan_candidates[0].path})' ) - run_data["m0scan"] = m0scan_candidates[0].path + run_data['m0scan'] = m0scan_candidates[0].path else: - run_data["m0scan"] = None + run_data['m0scan'] = None m0scan_metadata = None asl_metadata = layout.get_metadata(asl_file) - if (asl_metadata["M0Type"] == "Separate") and not run_data["m0scan"]: - raise FileNotFoundError(f"M0 file for {asl_file} not found.") - elif asl_metadata["M0Type"] == "Separate": - m0scan_metadata = layout.get_file(run_data["m0scan"]).get_metadata() + if (asl_metadata['M0Type'] == 'Separate') and not run_data['m0scan']: + raise FileNotFoundError(f'M0 file for {asl_file} not found.') + elif asl_metadata['M0Type'] == 'Separate': + m0scan_metadata = layout.get_file(run_data['m0scan']).get_metadata() if not m0scan_metadata: raise Exception(f"No metadata for m0scan: {run_data['m0scan']}") - elif run_data["m0scan"]: + elif run_data['m0scan']: raise ValueError( f"M0Type is {run_data['asl_metadata']['M0Type']}, " f"but an M0 scan was found at {run_data['m0scan']}" @@ -111,14 +110,14 @@ def collect_run_data(layout, asl_file): config.loggers.workflow.info( ( - f"Collected run data for {asl_file}:\n" - f"{yaml.dump(run_data, default_flow_style=False, indent=4)}" + f'Collected run data for {asl_file}:\n' + f'{yaml.dump(run_data, default_flow_style=False, indent=4)}' ), ) # Add metadata to dictionary now (we don't want to print these with the logger). - run_data["asl_metadata"] = asl_metadata - run_data["m0scan_metadata"] = m0scan_metadata + run_data['asl_metadata'] = asl_metadata + run_data['m0scan_metadata'] = m0scan_metadata return run_data @@ -128,12 +127,12 @@ def collect_derivatives( entities: dict, fieldmap_id: str | None, spec: dict | None = None, - patterns: ty.List[str] | None = None, + patterns: list[str] | None = None, ): """Gather existing derivatives and compose a cache.""" if spec is None or patterns is None: _spec, _patterns = tuple( - json.loads(load_data.readable("io_spec.json").read_text()).values() + json.loads(load_data.readable('io_spec.json').read_text()).values() ) if spec is None: @@ -142,22 +141,22 @@ def collect_derivatives( patterns = _patterns derivs_cache = defaultdict(list, {}) - layout = BIDSLayout(derivatives_dir, config=["bids", "derivatives"], validate=False) + layout = BIDSLayout(derivatives_dir, config=['bids', 'derivatives'], validate=False) derivatives_dir = Path(derivatives_dir) # search for both aslrefs - for k, q in spec["baseline"].items(): + for k, q in spec['baseline'].items(): query = {**q, **entities} - item = layout.get(return_type="filename", **query) + item = layout.get(return_type='filename', **query) if not item: continue - derivs_cache[f"{k}_aslref"] = item[0] if len(item) == 1 else item + derivs_cache[f'{k}_aslref'] = item[0] if len(item) == 1 else item - for xfm, q in spec["transforms"].items(): + for xfm, q in spec['transforms'].items(): query = {**q, **entities} - if xfm == "aslref2fmap": - query["to"] = fieldmap_id - item = layout.get(return_type="filename", **q) + if xfm == 'aslref2fmap': + query['to'] = fieldmap_id + item = layout.get(return_type='filename', **q) if not item: continue derivs_cache[xfm] = item[0] if len(item) == 1 else item @@ -167,20 +166,20 @@ def collect_derivatives( def write_bidsignore(deriv_dir): """Write .bidsignore file.""" bids_ignore = ( - "*.html", - "logs/", - "figures/", # Reports - "*_xfm.*", # Unspecified transform files - "*.surf.gii", # Unspecified structural outputs + '*.html', + 'logs/', + 'figures/', # Reports + '*_xfm.*', # Unspecified transform files + '*.surf.gii', # Unspecified structural outputs # Unspecified functional outputs - "*_aslref.nii.gz", - "*_asl.func.gii", - "*_mixing.tsv", - "*_timeseries.tsv", + '*_aslref.nii.gz', + '*_asl.func.gii', + '*_mixing.tsv', + '*_timeseries.tsv', ) - ignore_file = Path(deriv_dir) / ".bidsignore" + ignore_file = Path(deriv_dir) / '.bidsignore' - ignore_file.write_text("\n".join(bids_ignore) + "\n") + ignore_file.write_text('\n'.join(bids_ignore) + '\n') def write_derivative_description(bids_dir, deriv_dir): @@ -190,45 +189,45 @@ def write_derivative_description(bids_dir, deriv_dir): bids_dir = Path(bids_dir) deriv_dir = Path(deriv_dir) desc = { - "Name": "ASLPrep - ASL PREProcessing workflow", - "BIDSVersion": "1.9.0", - "PipelineDescription": { - "Name": "ASLPrep", - "Version": __version__, - "CodeURL": DOWNLOAD_URL, + 'Name': 'ASLPrep - ASL PREProcessing workflow', + 'BIDSVersion': '1.9.0', + 'PipelineDescription': { + 'Name': 'ASLPrep', + 'Version': __version__, + 'CodeURL': DOWNLOAD_URL, }, - "CodeURL": __url__, - "HowToAcknowledge": "Please cite our paper " - "and include the generated citation boilerplate within the Methods " - "section of the text.", + 'CodeURL': __url__, + 'HowToAcknowledge': 'Please cite our paper ' + 'and include the generated citation boilerplate within the Methods ' + 'section of the text.', } # Keys that can only be set by environment - if "ASLPREP_DOCKER_TAG" in os.environ: - desc["DockerHubContainerTag"] = os.environ["ASLPREP_DOCKER_TAG"] + if 'ASLPREP_DOCKER_TAG' in os.environ: + desc['DockerHubContainerTag'] = os.environ['ASLPREP_DOCKER_TAG'] - if "ASLPREP_SINGULARITY_URL" in os.environ: - singularity_url = os.environ["ASLPREP_SINGULARITY_URL"] - desc["SingularityContainerURL"] = singularity_url + if 'ASLPREP_SINGULARITY_URL' in os.environ: + singularity_url = os.environ['ASLPREP_SINGULARITY_URL'] + desc['SingularityContainerURL'] = singularity_url singularity_md5 = _get_shub_version(singularity_url) if singularity_md5 and singularity_md5 is not NotImplemented: - desc["SingularityContainerMD5"] = _get_shub_version(singularity_url) + desc['SingularityContainerMD5'] = _get_shub_version(singularity_url) # Keys deriving from source dataset orig_desc = {} - fname = bids_dir / "dataset_description.json" + fname = bids_dir / 'dataset_description.json' if fname.exists(): with fname.open() as fobj: orig_desc = json.load(fobj) - if "DatasetDOI" in orig_desc: - desc["SourceDatasetsURLs"] = [f"https://doi.org/{orig_desc['DatasetDOI']}"] + if 'DatasetDOI' in orig_desc: + desc['SourceDatasetsURLs'] = [f"https://doi.org/{orig_desc['DatasetDOI']}"] - if "License" in orig_desc: - desc["License"] = orig_desc["License"] + if 'License' in orig_desc: + desc['License'] = orig_desc['License'] - with (deriv_dir / "dataset_description.json").open("w") as fobj: + with (deriv_dir / 'dataset_description.json').open('w') as fobj: json.dump(desc, fobj, indent=4) @@ -242,93 +241,93 @@ def validate_input_dir(exec_env, bids_dir, participant_label): import tempfile validator_config_dict = { - "ignore": [ - "EVENTS_COLUMN_ONSET", - "EVENTS_COLUMN_DURATION", - "TSV_EQUAL_ROWS", - "TSV_EMPTY_CELL", - "TSV_IMPROPER_NA", - "VOLUME_COUNT_MISMATCH", - "BVAL_MULTIPLE_ROWS", - "BVEC_NUMBER_ROWS", - "DWI_MISSING_BVAL", - "INCONSISTENT_SUBJECTS", - "INCONSISTENT_PARAMETERS", - "BVEC_ROW_LENGTH", - "B_FILE", - "PARTICIPANT_ID_COLUMN", - "PARTICIPANT_ID_MISMATCH", - "TASK_NAME_MUST_DEFINE", - "PHENOTYPE_SUBJECTS_MISSING", - "STIMULUS_FILE_MISSING", - "DWI_MISSING_BVEC", - "EVENTS_TSV_MISSING", - "TSV_IMPROPER_NA", - "ACQTIME_FMT", - "Participants age 89 or higher", - "DATASET_DESCRIPTION_JSON_MISSING", - "FILENAME_COLUMN", - "WRONG_NEW_LINE", - "MISSING_TSV_COLUMN_CHANNELS", - "MISSING_TSV_COLUMN_IEEG_CHANNELS", - "MISSING_TSV_COLUMN_IEEG_ELECTRODES", - "UNUSED_STIMULUS", - "CHANNELS_COLUMN_SFREQ", - "CHANNELS_COLUMN_LOWCUT", - "CHANNELS_COLUMN_HIGHCUT", - "CHANNELS_COLUMN_NOTCH", - "CUSTOM_COLUMN_WITHOUT_DESCRIPTION", - "ACQTIME_FMT", - "SUSPICIOUSLY_LONG_EVENT_DESIGN", - "SUSPICIOUSLY_SHORT_EVENT_DESIGN", - "MALFORMED_BVEC", - "MALFORMED_BVAL", - "MISSING_TSV_COLUMN_EEG_ELECTRODES", - "MISSING_SESSION", + 'ignore': [ + 'EVENTS_COLUMN_ONSET', + 'EVENTS_COLUMN_DURATION', + 'TSV_EQUAL_ROWS', + 'TSV_EMPTY_CELL', + 'TSV_IMPROPER_NA', + 'VOLUME_COUNT_MISMATCH', + 'BVAL_MULTIPLE_ROWS', + 'BVEC_NUMBER_ROWS', + 'DWI_MISSING_BVAL', + 'INCONSISTENT_SUBJECTS', + 'INCONSISTENT_PARAMETERS', + 'BVEC_ROW_LENGTH', + 'B_FILE', + 'PARTICIPANT_ID_COLUMN', + 'PARTICIPANT_ID_MISMATCH', + 'TASK_NAME_MUST_DEFINE', + 'PHENOTYPE_SUBJECTS_MISSING', + 'STIMULUS_FILE_MISSING', + 'DWI_MISSING_BVEC', + 'EVENTS_TSV_MISSING', + 'TSV_IMPROPER_NA', + 'ACQTIME_FMT', + 'Participants age 89 or higher', + 'DATASET_DESCRIPTION_JSON_MISSING', + 'FILENAME_COLUMN', + 'WRONG_NEW_LINE', + 'MISSING_TSV_COLUMN_CHANNELS', + 'MISSING_TSV_COLUMN_IEEG_CHANNELS', + 'MISSING_TSV_COLUMN_IEEG_ELECTRODES', + 'UNUSED_STIMULUS', + 'CHANNELS_COLUMN_SFREQ', + 'CHANNELS_COLUMN_LOWCUT', + 'CHANNELS_COLUMN_HIGHCUT', + 'CHANNELS_COLUMN_NOTCH', + 'CUSTOM_COLUMN_WITHOUT_DESCRIPTION', + 'ACQTIME_FMT', + 'SUSPICIOUSLY_LONG_EVENT_DESIGN', + 'SUSPICIOUSLY_SHORT_EVENT_DESIGN', + 'MALFORMED_BVEC', + 'MALFORMED_BVAL', + 'MISSING_TSV_COLUMN_EEG_ELECTRODES', + 'MISSING_SESSION', ], - "error": ["NO_T1W"], - "ignoredFiles": ["/dataset_description.json", "/participants.tsv"], + 'error': ['NO_T1W'], + 'ignoredFiles': ['/dataset_description.json', '/participants.tsv'], } # Limit validation only to data from requested participants if participant_label: - all_subs = {s.name[4:] for s in bids_dir.glob("sub-*")} - selected_subs = {s[4:] if s.startswith("sub-") else s for s in participant_label} + all_subs = {s.name[4:] for s in bids_dir.glob('sub-*')} + selected_subs = {s[4:] if s.startswith('sub-') else s for s in participant_label} if bad_labels := selected_subs.difference(all_subs): error_msg = ( "Data for requested participant(s) label(s) not found. " f"Could not find data for participant(s): {','.join(bad_labels)}. " "Please verify the requested participant labels." ) - if exec_env == "docker": + if exec_env == 'docker': error_msg += ( - " This error can be caused by the input data not being " - "accessible inside the docker container. " - "Please make sure all volumes are mounted properly " - "(see https://docs.docker.com/engine/reference/commandline/" - "run/#mount-volume--v---read-only)" + ' This error can be caused by the input data not being ' + 'accessible inside the docker container. ' + 'Please make sure all volumes are mounted properly ' + '(see https://docs.docker.com/engine/reference/commandline/' + 'run/#mount-volume--v---read-only)' ) - elif exec_env == "singularity": + elif exec_env == 'singularity': error_msg += ( - " This error can be caused by the input data not being " - "accessible inside the singularity container. " - "Please make sure all paths are mapped properly " - "(see https://www.sylabs.io/guides/3.0/user-guide/bind_paths_and_mounts.html)" + ' This error can be caused by the input data not being ' + 'accessible inside the singularity container. ' + 'Please make sure all paths are mapped properly ' + '(see https://www.sylabs.io/guides/3.0/user-guide/bind_paths_and_mounts.html)' ) raise RuntimeError(error_msg) if ignored_subs := all_subs.difference(selected_subs): for sub in ignored_subs: - validator_config_dict["ignoredFiles"].append(f"/sub-{sub}/**") + validator_config_dict['ignoredFiles'].append(f'/sub-{sub}/**') - with tempfile.NamedTemporaryFile("w+") as temp: + with tempfile.NamedTemporaryFile('w+') as temp: temp.write(json.dumps(validator_config_dict)) temp.flush() try: - subprocess.check_call(["bids-validator", bids_dir, "-c", temp.name]) + subprocess.check_call(['bids-validator', bids_dir, '-c', temp.name]) except FileNotFoundError: - print("bids-validator does not appear to be installed", file=sys.stderr) + print('bids-validator does not appear to be installed', file=sys.stderr) def _get_shub_version(singularity_url): # noqa: U100 @@ -340,19 +339,19 @@ def find_atlas_entities(filename): import os fname = os.path.basename(filename) - elements = fname.split("_") + elements = fname.split('_') out = [] - for ent in ("tpl", "atlas", "res"): - ent_parts = [el for el in elements if el.startswith(f"{ent}-")] + for ent in ('tpl', 'atlas', 'res'): + ent_parts = [el for el in elements if el.startswith(f'{ent}-')] ent_value = None if ent_parts: - ent_value = ent_parts[0].split("-")[1] + ent_value = ent_parts[0].split('-')[1] out.append(ent_value) - suffix = elements[-1].split(".")[0] - extension = "." + ".".join(elements[-1].split(".")[1:]) + suffix = elements[-1].split('.')[0] + extension = '.' + '.'.join(elements[-1].split('.')[1:]) out += [suffix, extension] return tuple(out) diff --git a/aslprep/utils/cbf.py b/aslprep/utils/cbf.py index e387f54f4..6cefd5e10 100644 --- a/aslprep/utils/cbf.py +++ b/aslprep/utils/cbf.py @@ -9,27 +9,27 @@ from aslprep import config -def _weightfun(x, wfun="huber"): +def _weightfun(x, wfun='huber'): """Get weight fun and tuner.""" - if wfun == "andrews": + if wfun == 'andrews': tuner = 1.339 weight = (np.abs(x) < np.pi) * np.sin(x) - elif wfun == "bisquare": + elif wfun == 'bisquare': tuner = 4.685 weight = (np.abs(x) < 1) * np.power((1 - np.power(x, 2)), 2) - elif wfun == "cauchy": + elif wfun == 'cauchy': tuner = 2.385 weight = 1 / (1 + np.power(x, 2)) - elif wfun == "logistic": + elif wfun == 'logistic': tuner = 1.205 weight = np.tanh(x) / x - elif wfun == "ols": + elif wfun == 'ols': tuner = 1 weight = np.repeat(1, len(x)) - elif wfun == "talwar": + elif wfun == 'talwar': tuner = 2.795 weight = 1 * (np.abs(x) < 1) - elif wfun == "welsch": + elif wfun == 'welsch': tuner = 2.985 weight = np.exp(-(np.power(x, 2))) else: @@ -38,24 +38,24 @@ def _weightfun(x, wfun="huber"): return weight, tuner -def _tune(wfun="huber"): +def _tune(wfun='huber'): """Get weight fun and tuner. But wait, you might say, the docstring makes no sense! Correct. """ - if wfun == "andrews": + if wfun == 'andrews': tuner = 1.339 - elif wfun == "bisquare": + elif wfun == 'bisquare': tuner = 4.685 - elif wfun == "cauchy": + elif wfun == 'cauchy': tuner = 2.385 - elif wfun == "logistic": + elif wfun == 'logistic': tuner = 1.205 - elif wfun == "ols": + elif wfun == 'ols': tuner = 1 - elif wfun == "talwar": + elif wfun == 'talwar': tuner = 2.795 - elif wfun == "welsch": + elif wfun == 'welsch': tuner = 2.985 else: tuner = 1.345 @@ -329,7 +329,7 @@ def _getcbfscore(cbfts, wm, gm, csf, mask, thresh=0.7): + (n_csf_voxels * np.var(R[csf == 1])) ) - config.loggers.utils.warning(f"SCORE retains {np.sum(indx == 0)}/{indx.size} volumes") + config.loggers.utils.warning(f'SCORE retains {np.sum(indx == 0)}/{indx.size} volumes') cbfts_recon = cbfts[:, :, :, indx == 0] cbfts_recon1 = np.zeros_like(cbfts_recon) for i in range(cbfts_recon.shape[3]): @@ -346,7 +346,7 @@ def _robust_fit( modrobprior, lmd=0, localprior=0, - wfun="huber", + wfun='huber', tune=1.345, flagstd=1, flagmodrobust=1, @@ -370,12 +370,12 @@ def _robust_fit( iter_num = 0 interlim = 10 while iter_num < interlim: - print("iteration ", iter_num, "\n") + print('iteration ', iter_num, '\n') iter_num = iter_num + 1 check1 = np.subtract(np.abs(b - b0), (D * np.maximum(np.abs(b), np.abs(b0)))) check1[check1 > 0] = 0 if any(check1): - print(" \n converged after ", iter_num, "iterations\n") + print(' \n converged after ', iter_num, 'iterations\n') break r = Y - X * (np.tile(b, (dimcbf[0], 1))) radj = r * adjfactor / sw @@ -399,7 +399,7 @@ def _robust_fit( return b -def _scrubcbf(cbf_ts, gm, wm, csf, mask, wfun="huber", thresh=0.7): +def _scrubcbf(cbf_ts, gm, wm, csf, mask, wfun='huber', thresh=0.7): """Apply SCRUB algorithm to CBF data. Parameters @@ -555,10 +555,10 @@ def estimate_att_pcasl(deltam_arr, plds, lds, t1blood, t1tissue): .. footbibliography:: """ n_voxels, n_plds = plds.shape - assert deltam_arr.shape == plds.shape, f"{deltam_arr.shape} != {plds.shape}" + assert deltam_arr.shape == plds.shape, f'{deltam_arr.shape} != {plds.shape}' # Beginning of auxil_asl_gen_wsum - assert lds.size == n_plds, f"{lds.size} != {n_plds}" + assert lds.size == n_plds, f'{lds.size} != {n_plds}' att_arr = np.empty(n_voxels) for i_voxel in range(n_voxels): @@ -572,8 +572,8 @@ def estimate_att_pcasl(deltam_arr, plds, lds, t1blood, t1tissue): 0.001, ) - sig_sum = np.zeros((transit_times.size)) - sig_pld_sum = np.zeros((transit_times.size)) + sig_sum = np.zeros(transit_times.size) + sig_pld_sum = np.zeros(transit_times.size) for j_pld in range(n_plds): pld = plds_by_voxel[j_pld] @@ -699,8 +699,8 @@ def estimate_cbf_pcasl_multipld( if n_plds != n_volumes: raise ValueError( - f"Number of PostLabelingDelays ({n_plds}) does not match " - f"number of delta-M volumes ({n_volumes})." + f'Number of PostLabelingDelays ({n_plds}) does not match ' + f'number of delta-M volumes ({n_volumes}).' ) # Formula from Fan 2017 (equation 2) @@ -713,8 +713,8 @@ def estimate_cbf_pcasl_multipld( if tau.size > 1: if tau.size != n_plds: raise ValueError( - f"Number of LabelingDurations ({tau.size}) != " - f"number of PostLabelingDelays ({n_plds})" + f'Number of LabelingDurations ({tau.size}) != ' + f'number of PostLabelingDelays ({n_plds})' ) tau = tau[unique_pld_idx] @@ -799,13 +799,13 @@ def estimate_t1(metadata): 3: 1.65, 7: 2.087, } - t1blood = T1BLOOD_DICT.get(metadata["MagneticFieldStrength"]) + t1blood = T1BLOOD_DICT.get(metadata['MagneticFieldStrength']) if not t1blood: config.loggers.interface.warning( f"T1blood cannot be inferred for {metadata['MagneticFieldStrength']}T data. " "Defaulting to formula from Zhang et al. (2013)." ) - t1blood = (110 * metadata["MagneticFieldStrength"] + 1316) / 1000 + t1blood = (110 * metadata['MagneticFieldStrength'] + 1316) / 1000 # TODO: Supplement with formula for other field strengths T1TISSUE_DICT = { @@ -813,7 +813,7 @@ def estimate_t1(metadata): 3: 1.607, 7: 1.939, } - t1tissue = T1TISSUE_DICT.get(metadata["MagneticFieldStrength"]) + t1tissue = T1TISSUE_DICT.get(metadata['MagneticFieldStrength']) if not t1tissue: raise ValueError( f"T1tissue cannot be inferred for {metadata['MagneticFieldStrength']}T data." diff --git a/aslprep/utils/confounds.py b/aslprep/utils/confounds.py index 49aa097b6..758b2f3cf 100644 --- a/aslprep/utils/confounds.py +++ b/aslprep/utils/confounds.py @@ -13,7 +13,7 @@ def _less_breakable(a_string): """Harden the string to different environments, whatever that means.""" - return "".join(a_string.split()).strip("#") + return ''.join(a_string.split()).strip('#') def _camel_to_snake(name): @@ -22,8 +22,8 @@ def _camel_to_snake(name): Taken from https://stackoverflow.com/questions/1175208/. If we end up using it more than just here, probably worth pulling in a well-tested package. """ - s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def _adjust_indices(left_df, right_df): @@ -65,13 +65,13 @@ def _gather_confounds( all_files = [] confounds_list = [] for confound, name in ( - (signals, "Global signals"), - (std_dvars, "Standardized DVARS"), - (dvars, "DVARS"), - (fdisp, "Framewise displacement"), - (rmsd, "RMSD"), - (motion, "Motion parameters"), - (score, "score_outlier_index"), + (signals, 'Global signals'), + (std_dvars, 'Standardized DVARS'), + (dvars, 'DVARS'), + (fdisp, 'Framewise displacement'), + (rmsd, 'RMSD'), + (motion, 'Motion parameters'), + (score, 'score_outlier_index'), ): if confound is not None and isdefined(confound): confounds_list.append(name) @@ -92,8 +92,8 @@ def _gather_confounds( if newpath is None: newpath = os.getcwd() - combined_out = os.path.join(newpath, "confounds.tsv") - confounds_data.to_csv(combined_out, sep="\t", index=False, na_rep="n/a") + combined_out = os.path.join(newpath, 'confounds.tsv') + confounds_data.to_csv(combined_out, sep='\t', index=False, na_rep='n/a') return combined_out, confounds_list diff --git a/aslprep/utils/misc.py b/aslprep/utils/misc.py index 6b060acd1..a49d6997a 100644 --- a/aslprep/utils/misc.py +++ b/aslprep/utils/misc.py @@ -4,7 +4,6 @@ from __future__ import annotations import os -import typing as ty import nibabel as nb @@ -16,7 +15,7 @@ def check_deps(workflow): return sorted( (node.interface.__class__.__name__, node.interface._cmd) for node in workflow._get_all_nodes() - if (hasattr(node.interface, "_cmd") and which(node.interface._cmd.split()[0]) is None) + if (hasattr(node.interface, '_cmd') and which(node.interface._cmd.split()[0]) is None) ) @@ -28,7 +27,7 @@ def get_n_volumes(fname): elif img.ndim == 4: n_volumes = img.shape[3] else: - raise ValueError(f"Image has {img.ndim} dimensions: {fname}") + raise ValueError(f'Image has {img.ndim} dimensions: {fname}') return n_volumes @@ -38,9 +37,9 @@ def _create_mem_gb(asl_fname): asl_size_gb = os.path.getsize(asl_fname) / (1024**3) asl_tlen = nb.load(asl_fname).shape[-1] mem_gb = { - "filesize": asl_size_gb, - "resampled": asl_size_gb * 4, - "largemem": asl_size_gb * (max(asl_tlen / 100, 1.0) + 4), + 'filesize': asl_size_gb, + 'resampled': asl_size_gb * 4, + 'largemem': asl_size_gb * (max(asl_tlen / 100, 1.0) + 4), } return asl_tlen, mem_gb @@ -58,10 +57,10 @@ def _get_wf_name(asl_fname): from nipype.utils.filemanip import split_filename fname = split_filename(asl_fname)[1] - fname_nosub = "_".join(fname.split("_")[1:]) - name = "asl_preproc_" + fname_nosub.replace(".", "_").replace(" ", "").replace( - "-", "_" - ).replace("_asl", "_wf") + fname_nosub = '_'.join(fname.split('_')[1:]) + name = 'asl_preproc_' + fname_nosub.replace('.', '_').replace(' ', '').replace( + '-', '_' + ).replace('_asl', '_wf') return name @@ -73,23 +72,23 @@ def _select_last_in_list(lst): def _prefix(subid): """Add sub- prefix to subject ID, if necessary.""" - return subid if subid.startswith("sub-") else f"sub-{subid}" + return subid if subid.startswith('sub-') else f'sub-{subid}' -def estimate_asl_mem_usage(asl_fname: str) -> ty.Tuple[int, dict]: +def estimate_asl_mem_usage(asl_fname: str) -> tuple[int, dict]: """Estimate ASL memory usage.""" import nibabel as nb import numpy as np img = nb.load(asl_fname) - nvox = int(np.prod(img.shape, dtype="u8")) + nvox = int(np.prod(img.shape, dtype='u8')) # Assume tools will coerce to 8-byte floats to be safe asl_size_gb = 8 * nvox / (1024**3) asl_tlen = img.shape[-1] mem_gb = { - "filesize": asl_size_gb, - "resampled": asl_size_gb * 4, - "largemem": asl_size_gb * (max(asl_tlen / 100, 1.0) + 4), + 'filesize': asl_size_gb, + 'resampled': asl_size_gb * 4, + 'largemem': asl_size_gb * (max(asl_tlen / 100, 1.0) + 4), } return asl_tlen, mem_gb diff --git a/aslprep/utils/plotting.py b/aslprep/utils/plotting.py index b24a07a29..cae7a31eb 100644 --- a/aslprep/utils/plotting.py +++ b/aslprep/utils/plotting.py @@ -19,13 +19,13 @@ from svgutils.transform import SVGFigure -class CBFPlot(object): +class CBFPlot: """Generate the CBF Summary Plot. This plot restricts CBF values to -20 (if there are negative values) or 0 (if not) to 100. """ - __slots__ = ["cbf", "ref_vol", "label", "outfile", "vmax"] + __slots__ = ['cbf', 'ref_vol', 'label', 'outfile', 'vmax'] def __init__(self, cbf, ref_vol, label, outfile, vmax): self.cbf = cbf @@ -57,11 +57,11 @@ def plot_stat_map( cbf, ref_vol, plot_params=None, - order=("z", "x", "y"), + order=('z', 'x', 'y'), vmax=100, estimate_brightness=False, label=None, - compress="auto", + compress='auto', ): """Plot statistical map.""" plot_params = {} if plot_params is None else plot_params @@ -85,7 +85,7 @@ def plot_stat_map( display = plotting.plot_stat_map( stat_map_img=image_nii, bg_img=ref_vol, - resampling_interpolation="nearest", + resampling_interpolation='nearest', display_mode=mode, cut_coords=cuts[mode], vmax=vmax, @@ -93,7 +93,7 @@ def plot_stat_map( draw_cross=False, colorbar=True, symmetric_cbar=False, - cmap="coolwarm", + cmap='coolwarm', title=label if i == 0 else None, ) svg = extract_svg(display, compress=compress) @@ -117,14 +117,14 @@ class fMRIPlot: # noqa:N801 """Generates the fMRI Summary Plot.""" __slots__ = ( - "timeseries", - "segments", - "tr", - "confounds", - "spikes", - "nskip", - "sort_carpet", - "paired_carpet", + 'timeseries', + 'segments', + 'tr', + 'confounds', + 'spikes', + 'nskip', + 'sort_carpet', + 'paired_carpet', ) def __init__( @@ -155,14 +155,14 @@ def __init__( vlines = {} self.confounds = {} if confounds is None and conf_file: - confounds = pd.read_csv(conf_file, sep=r"[\t\s]+", usecols=usecols, index_col=False) + confounds = pd.read_csv(conf_file, sep=r'[\t\s]+', usecols=usecols, index_col=False) if confounds is not None: for name in confounds.columns: self.confounds[name] = { - "values": confounds[[name]].values.squeeze().tolist(), - "units": units.get(name), - "cutoff": vlines.get(name), + 'values': confounds[[name]].values.squeeze().tolist(), + 'units': units.get(name), + 'cutoff': vlines.get(name), } self.spikes = [] @@ -175,8 +175,8 @@ def plot(self, figure=None): import seaborn as sns from niworkflows.viz.plots import plot_carpet as plt_carpet - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) + sns.set_style('whitegrid') + sns.set_context('paper', font_scale=0.8) if figure is None: figure = plt.gcf() @@ -198,10 +198,10 @@ def plot(self, figure=None): if self.confounds: from seaborn import color_palette - palette = color_palette("husl", nconfounds) + palette = color_palette('husl', nconfounds) for i, (name, kwargs) in enumerate(self.confounds.items()): - tseries = kwargs.pop("values") + tseries = kwargs.pop('values') try: confoundplot( tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs @@ -217,7 +217,7 @@ def plot(self, figure=None): tr=self.tr, sort_rows=self.sort_carpet, drop_trs=self.nskip, - cmap="paired" if self.paired_carpet else None, + cmap='paired' if self.paired_carpet else None, # This is the only modification we need for ASLPrep detrend=False, ) diff --git a/aslprep/utils/sentry.py b/aslprep/utils/sentry.py index bce017ff2..5f11245a5 100644 --- a/aslprep/utils/sentry.py +++ b/aslprep/utils/sentry.py @@ -12,43 +12,43 @@ CHUNK_SIZE = 16384 # Group common events with pre specified fingerprints KNOWN_ERRORS = { - "permission-denied": ["PermissionError: [Errno 13] Permission denied"], - "memory-error": [ - "MemoryError", - "Cannot allocate memory", - "Return code: 134", + 'permission-denied': ['PermissionError: [Errno 13] Permission denied'], + 'memory-error': [ + 'MemoryError', + 'Cannot allocate memory', + 'Return code: 134', ], - "reconall-already-running": ["ERROR: it appears that recon-all is already running"], - "no-disk-space": ["[Errno 28] No space left on device", "[Errno 122] Disk quota exceeded"], - "segfault": [ - "Segmentation Fault", - "Segfault", - "Return code: 139", + 'reconall-already-running': ['ERROR: it appears that recon-all is already running'], + 'no-disk-space': ['[Errno 28] No space left on device', '[Errno 122] Disk quota exceeded'], + 'segfault': [ + 'Segmentation Fault', + 'Segfault', + 'Return code: 139', ], - "potential-race-condition": [ - "[Errno 39] Directory not empty", - "_unfinished.json", + 'potential-race-condition': [ + '[Errno 39] Directory not empty', + '_unfinished.json', ], - "keyboard-interrupt": [ - "KeyboardInterrupt", + 'keyboard-interrupt': [ + 'KeyboardInterrupt', ], } def sentry_setup(): """Set up sentry.""" - release = config.environment.version or "dev" + release = config.environment.version or 'dev' environment = ( - "dev" + 'dev' if ( - os.getenv("ASLPREP_DEV", "").lower in ("1", "on", "yes", "y", "true") - or ("+" in release) + os.getenv('ASLPREP_DEV', '').lower in ('1', 'on', 'yes', 'y', 'true') + or ('+' in release) ) - else "prod" + else 'prod' ) sentry_sdk.init( - "https://301d1432c6a149eabcff8bbb341fd52d@o317280.ingest.sentry.io/5257228", + 'https://301d1432c6a149eabcff8bbb341fd52d@o317280.ingest.sentry.io/5257228', release=release, environment=environment, before_send=before_send, @@ -62,14 +62,14 @@ def process_crashfile(crashfile): """Parse the contents of a crashfile and submit sentry messages.""" crash_info = read_crashfile(str(crashfile)) with sentry_sdk.push_scope() as scope: - scope.level = "fatal" + scope.level = 'fatal' # Extract node name - node_name = crash_info.pop("node").split(".")[-1] - scope.set_tag("node_name", node_name) + node_name = crash_info.pop('node').split('.')[-1] + scope.set_tag('node_name', node_name) # Massage the traceback, extract the gist - traceback = crash_info.pop("traceback") + traceback = crash_info.pop('traceback') # last line is probably most informative summary gist = traceback.splitlines()[-1] exception_text_start = 1 @@ -78,12 +78,12 @@ def process_crashfile(crashfile): break exception_text_start += 1 - exception_text = "\n".join(traceback.splitlines()[exception_text_start:]) + exception_text = '\n'.join(traceback.splitlines()[exception_text_start:]) # Extract inputs, if present - inputs = crash_info.pop("inputs", None) + inputs = crash_info.pop('inputs', None) if inputs: - scope.set_extra("inputs", dict(inputs)) + scope.set_extra('inputs', dict(inputs)) # Extract any other possible metadata in the crash file for k, v in crash_info.items(): @@ -92,10 +92,10 @@ def process_crashfile(crashfile): scope.set_extra(k, strv[0]) else: for i, chunk in enumerate(strv): - scope.set_extra("%s_%02d" % (k, i), chunk) # noqa:FS001 + scope.set_extra('%s_%02d' % (k, i), chunk) # noqa:FS001 - fingerprint = "" - issue_title = f"{node_name}: {gist}" + fingerprint = '' + issue_title = f'{node_name}: {gist}' for new_fingerprint, error_snippets in KNOWN_ERRORS.items(): for error_snippet in error_snippets: if error_snippet in traceback: @@ -105,47 +105,47 @@ def process_crashfile(crashfile): if fingerprint: break - message = issue_title + "\n\n" + message = issue_title + '\n\n' message += exception_text[-(8192 - len(message)) :] if fingerprint: - sentry_sdk.add_breadcrumb(message=fingerprint, level="fatal") + sentry_sdk.add_breadcrumb(message=fingerprint, level='fatal') else: # remove file paths - fingerprint = re.sub(r"(/[^/ ]*)+/?", "", message) + fingerprint = re.sub(r'(/[^/ ]*)+/?', '', message) # remove words containing numbers - fingerprint = re.sub(r"([a-zA-Z]*[0-9]+[a-zA-Z]*)+", "", fingerprint) + fingerprint = re.sub(r'([a-zA-Z]*[0-9]+[a-zA-Z]*)+', '', fingerprint) # adding the return code if it exists for line in message.splitlines(): - if line.startswith("Return code"): + if line.startswith('Return code'): fingerprint += line break scope.fingerprint = [fingerprint] - sentry_sdk.capture_message(message, "fatal") + sentry_sdk.capture_message(message, 'fatal') def before_send(event, hints): # noqa:U100 """Filter log messages about crashed nodes.""" - if "logentry" in event and "message" in event["logentry"]: - msg = event["logentry"]["message"] - if msg.startswith("could not run node:"): + if 'logentry' in event and 'message' in event['logentry']: + msg = event['logentry']['message'] + if msg.startswith('could not run node:'): return None - if msg.startswith("Saving crash info to "): + if msg.startswith('Saving crash info to '): return None - if re.match("Node .+ failed to run on host .+", msg): + if re.match('Node .+ failed to run on host .+', msg): return None - if "breadcrumbs" in event and isinstance(event["breadcrumbs"], list): + if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list): fingerprints_to_propagate = [ - "no-disk-space", - "memory-error", - "permission-denied", - "keyboard-interrupt", + 'no-disk-space', + 'memory-error', + 'permission-denied', + 'keyboard-interrupt', ] - for bc in event["breadcrumbs"]: - msg = bc.get("message", "empty-msg") + for bc in event['breadcrumbs']: + msg = bc.get('message', 'empty-msg') if msg in fingerprints_to_propagate: - event["fingerprint"] = [msg] + event['fingerprint'] = [msg] break return event diff --git a/aslprep/workflows/__init__.py b/aslprep/workflows/__init__.py index 16a22bf86..2f72a186b 100644 --- a/aslprep/workflows/__init__.py +++ b/aslprep/workflows/__init__.py @@ -3,6 +3,6 @@ from aslprep.workflows import asl, base __all__ = [ - "asl", - "base", + 'asl', + 'base', ] diff --git a/aslprep/workflows/asl/__init__.py b/aslprep/workflows/asl/__init__.py index 507a4553b..363374069 100644 --- a/aslprep/workflows/asl/__init__.py +++ b/aslprep/workflows/asl/__init__.py @@ -15,14 +15,14 @@ ) __all__ = [ - "apply", - "base", - "cbf", - "confounds", - "fit", - "hmc", - "outputs", - "plotting", - "reference", - "resampling", + 'apply', + 'base', + 'cbf', + 'confounds', + 'fit', + 'hmc', + 'outputs', + 'plotting', + 'reference', + 'resampling', ] diff --git a/aslprep/workflows/asl/apply.py b/aslprep/workflows/asl/apply.py index 8fe611118..7414a814f 100644 --- a/aslprep/workflows/asl/apply.py +++ b/aslprep/workflows/asl/apply.py @@ -14,7 +14,7 @@ def init_asl_cifti_resample_wf( mem_gb: dict, fieldmap_id: str | None = None, omp_nthreads: int = 1, - name: str = "asl_cifti_resample_wf", + name: str = 'asl_cifti_resample_wf', ) -> pe.Workflow: """Resample an ASL series to a CIFTI target space. @@ -99,34 +99,34 @@ def init_asl_cifti_resample_wf( niu.IdentityInterface( fields=[ # Raw ASL file (asl_minimal) - "asl_file", + 'asl_file', # ASL file in T1w space - "asl_anat", + 'asl_anat', # Other inputs - "mni6_mask", - "aslref2fmap_xfm", - "aslref2anat_xfm", - "anat2mni6_xfm", - "fmap_ref", - "fmap_coeff", - "fmap_id", - "motion_xfm", - "coreg_aslref", - "white", - "pial", - "midthickness", - "midthickness_fsLR", - "sphere_reg_fsLR", - "cortex_mask", - "anat_ribbon", + 'mni6_mask', + 'aslref2fmap_xfm', + 'aslref2anat_xfm', + 'anat2mni6_xfm', + 'fmap_ref', + 'fmap_coeff', + 'fmap_id', + 'motion_xfm', + 'coreg_aslref', + 'white', + 'pial', + 'midthickness', + 'midthickness_fsLR', + 'sphere_reg_fsLR', + 'cortex_mask', + 'anat_ribbon', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["asl_cifti", "cifti_metadata", "goodvoxels_mask"]), - name="outputnode", + niu.IdentityInterface(fields=['asl_cifti', 'cifti_metadata', 'goodvoxels_mask']), + name='outputnode', ) asl_MNI6_wf = init_bold_volumetric_resample_wf( @@ -134,30 +134,30 @@ def init_asl_cifti_resample_wf( fieldmap_id=fieldmap_id, omp_nthreads=omp_nthreads, mem_gb=mem_gb, - jacobian="fmap-jacobian" not in config.workflow.ignore, - name="asl_MNI6_wf", + jacobian='fmap-jacobian' not in config.workflow.ignore, + name='asl_MNI6_wf', ) asl_fsLR_resampling_wf = init_bold_fsLR_resampling_wf( grayord_density=config.workflow.cifti_output, omp_nthreads=omp_nthreads, - mem_gb=mem_gb["resampled"], - name="asl_fsLR_resampling_wf", + mem_gb=mem_gb['resampled'], + name='asl_fsLR_resampling_wf', ) if config.workflow.project_goodvoxels: - goodvoxels_bold_mask_wf = init_goodvoxels_bold_mask_wf(mem_gb["resampled"]) + goodvoxels_bold_mask_wf = init_goodvoxels_bold_mask_wf(mem_gb['resampled']) workflow.connect([ (inputnode, goodvoxels_bold_mask_wf, [ - ("asl_anat", "inputnode.bold_file"), - ("anat_ribbon", "inputnode.anat_ribbon"), + ('asl_anat', 'inputnode.bold_file'), + ('anat_ribbon', 'inputnode.anat_ribbon'), ]), (goodvoxels_bold_mask_wf, asl_fsLR_resampling_wf, [ - ("outputnode.goodvoxels_mask", "inputnode.volume_roi"), + ('outputnode.goodvoxels_mask', 'inputnode.volume_roi'), ]), (goodvoxels_bold_mask_wf, outputnode, [ - ("outputnode.goodvoxels_mask", "goodvoxels_mask"), + ('outputnode.goodvoxels_mask', 'goodvoxels_mask'), ]), ]) # fmt:skip @@ -168,43 +168,43 @@ def init_asl_cifti_resample_wf( asl_grayords_wf = init_bold_grayords_wf( grayord_density=config.workflow.cifti_output, - mem_gb=mem_gb["resampled"], - repetition_time=metadata["RepetitionTime"], - name="asl_grayords_wf", + mem_gb=mem_gb['resampled'], + repetition_time=metadata['RepetitionTime'], + name='asl_grayords_wf', ) workflow.connect([ # Resample BOLD to MNI152NLin6Asym, may duplicate asl_std_wf above (inputnode, asl_MNI6_wf, [ - ("mni6_mask", "inputnode.target_ref_file"), - ("mni6_mask", "inputnode.target_mask"), - ("anat2mni6_xfm", "inputnode.anat2std_xfm"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_id", "inputnode.fmap_id"), - ("asl_file", "inputnode.bold_file"), - ("motion_xfm", "inputnode.motion_xfm"), - ("coreg_aslref", "inputnode.bold_ref_file"), - ("aslref2fmap_xfm", "inputnode.boldref2fmap_xfm"), - ("aslref2anat_xfm", "inputnode.boldref2anat_xfm"), + ('mni6_mask', 'inputnode.target_ref_file'), + ('mni6_mask', 'inputnode.target_mask'), + ('anat2mni6_xfm', 'inputnode.anat2std_xfm'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_id', 'inputnode.fmap_id'), + ('asl_file', 'inputnode.bold_file'), + ('motion_xfm', 'inputnode.motion_xfm'), + ('coreg_aslref', 'inputnode.bold_ref_file'), + ('aslref2fmap_xfm', 'inputnode.boldref2fmap_xfm'), + ('aslref2anat_xfm', 'inputnode.boldref2anat_xfm'), ]), # Resample T1w-space BOLD to fsLR surfaces (inputnode, asl_fsLR_resampling_wf, [ - ("asl_anat", "inputnode.bold_file"), - ("white", "inputnode.white"), - ("pial", "inputnode.pial"), - ("midthickness", "inputnode.midthickness"), - ("midthickness_fsLR", "inputnode.midthickness_fsLR"), - ("sphere_reg_fsLR", "inputnode.sphere_reg_fsLR"), - ("cortex_mask", "inputnode.cortex_mask"), + ('asl_anat', 'inputnode.bold_file'), + ('white', 'inputnode.white'), + ('pial', 'inputnode.pial'), + ('midthickness', 'inputnode.midthickness'), + ('midthickness_fsLR', 'inputnode.midthickness_fsLR'), + ('sphere_reg_fsLR', 'inputnode.sphere_reg_fsLR'), + ('cortex_mask', 'inputnode.cortex_mask'), ]), - (asl_MNI6_wf, asl_grayords_wf, [("outputnode.bold_file", "inputnode.bold_std")]), + (asl_MNI6_wf, asl_grayords_wf, [('outputnode.bold_file', 'inputnode.bold_std')]), (asl_fsLR_resampling_wf, asl_grayords_wf, [ - ("outputnode.bold_fsLR", "inputnode.bold_fsLR"), + ('outputnode.bold_fsLR', 'inputnode.bold_fsLR'), ]), (asl_grayords_wf, outputnode, [ - ("outputnode.cifti_bold", "asl_cifti"), - ("outputnode.cifti_metadata", "cifti_metadata"), + ('outputnode.cifti_bold', 'asl_cifti'), + ('outputnode.cifti_metadata', 'cifti_metadata'), ]), ]) # fmt:skip diff --git a/aslprep/workflows/asl/base.py b/aslprep/workflows/asl/base.py index 432a3a9d7..0b96fcc49 100644 --- a/aslprep/workflows/asl/base.py +++ b/aslprep/workflows/asl/base.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Preprocessing workflows for ASL data.""" -import typing as ty import numpy as np from fmriprep.workflows.bold.apply import init_bold_volumetric_resample_wf @@ -33,7 +32,7 @@ def init_asl_wf( *, asl_file: str, precomputed: dict = {}, - fieldmap_id: ty.Optional[str] = None, + fieldmap_id: str | None = None, ): """Perform the functional preprocessing stages of ASLPrep. @@ -183,10 +182,10 @@ def init_asl_wf( n_vols = get_n_volumes(asl_file) use_ge = config.workflow.use_ge if isinstance(config.workflow.use_ge, bool) else n_vols <= 5 if use_ge: - config.loggers.workflow.warning("Using GE-specific processing. HMC will be disabled.") + config.loggers.workflow.warning('Using GE-specific processing. HMC will be disabled.') if scorescrub: config.loggers.workflow.warning( - "SCORE/SCRUB processing is disabled for GE-specific processing" + 'SCORE/SCRUB processing is disabled for GE-specific processing' ) scorescrub = False @@ -196,60 +195,60 @@ def init_asl_wf( config.loggers.workflow.debug( ( 'Creating asl processing workflow for "%s" (%.2f GB / %d TRs). ' - "Memory resampled/largemem=%.2f/%.2f GB." + 'Memory resampled/largemem=%.2f/%.2f GB.' ), asl_file, - mem_gb["filesize"], + mem_gb['filesize'], asl_tlen, - mem_gb["resampled"], - mem_gb["largemem"], + mem_gb['resampled'], + mem_gb['largemem'], ) # Collect associated files run_data = collect_run_data(layout, asl_file) - metadata = run_data["asl_metadata"].copy() + metadata = run_data['asl_metadata'].copy() # Patch RepetitionTimePreparation into RepetitionTime, # for the sake of BOLD-based interfaces and workflows. # This value shouldn't be used for anything except figures and reportlets. - metadata["RepetitionTime"] = metadata.get( - "RepetitionTime", - np.mean(metadata["RepetitionTimePreparation"]), + metadata['RepetitionTime'] = metadata.get( + 'RepetitionTime', + np.mean(metadata['RepetitionTimePreparation']), ) is_multi_pld = determine_multi_pld(metadata=metadata) if scorescrub and is_multi_pld: config.loggers.workflow.warning( - f"SCORE/SCRUB processing will be disabled for multi-delay {asl_file}" + f'SCORE/SCRUB processing will be disabled for multi-delay {asl_file}' ) scorescrub = False # Determine which volumes to use in the pipeline - processing_target = select_processing_target(aslcontext=run_data["aslcontext"]) + processing_target = select_processing_target(aslcontext=run_data['aslcontext']) # Determine which CBF outputs to expect att_derivs = [] - cbf_3d_derivs = ["mean_cbf"] + cbf_3d_derivs = ['mean_cbf'] cbf_4d_derivs = [] if is_multi_pld: - att_derivs += ["att"] + att_derivs += ['att'] else: - cbf_4d_derivs += ["cbf_ts"] + cbf_4d_derivs += ['cbf_ts'] if scorescrub: - cbf_4d_derivs += ["cbf_ts_score"] + cbf_4d_derivs += ['cbf_ts_score'] cbf_3d_derivs += [ - "mean_cbf_score", - "mean_cbf_scrub", + 'mean_cbf_score', + 'mean_cbf_scrub', ] if basil: cbf_3d_derivs += [ - "mean_cbf_basil", - "mean_cbf_gm_basil", - "mean_cbf_wm_basil", + 'mean_cbf_basil', + 'mean_cbf_gm_basil', + 'mean_cbf_wm_basil', ] - att_derivs += ["att_basil"] + att_derivs += ['att_basil'] cbf_derivs = att_derivs + cbf_3d_derivs + cbf_4d_derivs @@ -266,61 +265,61 @@ def init_asl_wf( niu.IdentityInterface( fields=[ # ASL-specific elements - "asl_file", - "asl_metadata", - "aslcontext", - "m0scan", - "m0scan_metadata", + 'asl_file', + 'asl_metadata', + 'aslcontext', + 'm0scan', + 'm0scan_metadata', # Anatomical coregistration - "t1w_preproc", - "t1w_mask", - "t1w_dseg", - "t1w_tpms", + 't1w_preproc', + 't1w_mask', + 't1w_dseg', + 't1w_tpms', # FreeSurfer outputs - "subjects_dir", - "subject_id", - "fsnative2t1w_xfm", - "white", - "midthickness", - "pial", - "sphere_reg_fsLR", - "midthickness_fsLR", - "cortex_mask", - "anat_ribbon", + 'subjects_dir', + 'subject_id', + 'fsnative2t1w_xfm', + 'white', + 'midthickness', + 'pial', + 'sphere_reg_fsLR', + 'midthickness_fsLR', + 'cortex_mask', + 'anat_ribbon', # Fieldmap registration - "fmap", - "fmap_ref", - "fmap_coeff", - "fmap_mask", - "fmap_id", - "sdc_method", + 'fmap', + 'fmap_ref', + 'fmap_coeff', + 'fmap_mask', + 'fmap_id', + 'sdc_method', # Volumetric templates - "anat2std_xfm", - "std_t1w", - "std_mask", - "std_space", - "std_resolution", - "std_cohort", + 'anat2std_xfm', + 'std_t1w', + 'std_mask', + 'std_space', + 'std_resolution', + 'std_cohort', # MNI152NLin6Asym warp, for CIFTI use - "anat2mni6_xfm", - "mni6_mask", + 'anat2mni6_xfm', + 'mni6_mask', # MNI152NLin2009cAsym inverse warp, for carpetplotting - "mni2009c2anat_xfm", + 'mni2009c2anat_xfm', # MNI152NLin2009cAsym forward warp, for CBF QC - "anat2mni2009c_xfm", + 'anat2mni2009c_xfm', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.asl_file = asl_file inputnode.inputs.asl_metadata = metadata - inputnode.inputs.aslcontext = run_data["aslcontext"] - inputnode.inputs.m0scan_metadata = run_data["m0scan_metadata"] + inputnode.inputs.aslcontext = run_data['aslcontext'] + inputnode.inputs.m0scan_metadata = run_data['m0scan_metadata'] # Perform minimal preprocessing of the ASL data, including HMC and SDC asl_fit_wf = init_asl_fit_wf( asl_file=asl_file, - m0scan=run_data["m0scan"], + m0scan=run_data['m0scan'], use_ge=use_ge, precomputed=precomputed, fieldmap_id=fieldmap_id, @@ -329,20 +328,20 @@ def init_asl_wf( workflow.connect([ (inputnode, asl_fit_wf, [ - ("aslcontext", "inputnode.aslcontext"), + ('aslcontext', 'inputnode.aslcontext'), # Original inputs from fMRIPrep - ("t1w_preproc", "inputnode.t1w_preproc"), - ("t1w_mask", "inputnode.t1w_mask"), - ("t1w_dseg", "inputnode.t1w_dseg"), - ("subjects_dir", "inputnode.subjects_dir"), - ("subject_id", "inputnode.subject_id"), - ("fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), - ("fmap", "inputnode.fmap"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_mask", "inputnode.fmap_mask"), - ("fmap_id", "inputnode.fmap_id"), - ("sdc_method", "inputnode.sdc_method"), + ('t1w_preproc', 'inputnode.t1w_preproc'), + ('t1w_mask', 'inputnode.t1w_mask'), + ('t1w_dseg', 'inputnode.t1w_dseg'), + ('subjects_dir', 'inputnode.subjects_dir'), + ('subject_id', 'inputnode.subject_id'), + ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'), + ('fmap', 'inputnode.fmap'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_mask', 'inputnode.fmap_mask'), + ('fmap_id', 'inputnode.fmap_id'), + ('sdc_method', 'inputnode.sdc_method'), ]), ]) # fmt:skip @@ -351,25 +350,25 @@ def init_asl_wf( # because ASLPrep's main output is CBF and we need aslref-space data to calculate CBF. asl_native_wf = init_asl_native_wf( asl_file=asl_file, - m0scan=run_data["m0scan"], + m0scan=run_data['m0scan'], fieldmap_id=fieldmap_id, omp_nthreads=omp_nthreads, - name="asl_native_wf", + name='asl_native_wf', ) workflow.connect([ (inputnode, asl_native_wf, [ - ("aslcontext", "inputnode.aslcontext"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_id", "inputnode.fmap_id"), + ('aslcontext', 'inputnode.aslcontext'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_id', 'inputnode.fmap_id'), ]), (asl_fit_wf, asl_native_wf, [ - ("outputnode.coreg_aslref", "inputnode.aslref"), - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.motion_xfm", "inputnode.motion_xfm"), - ("outputnode.aslref2fmap_xfm", "inputnode.aslref2fmap_xfm"), - ("outputnode.dummy_scans", "inputnode.dummy_scans"), + ('outputnode.coreg_aslref', 'inputnode.aslref'), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.motion_xfm', 'inputnode.motion_xfm'), + ('outputnode.aslref2fmap_xfm', 'inputnode.aslref2fmap_xfm'), + ('outputnode.dummy_scans', 'inputnode.dummy_scans'), ]), ]) # fmt:skip @@ -384,27 +383,27 @@ def init_asl_wf( basil=basil, smooth_kernel=smooth_kernel, metadata=metadata, - name="cbf_wf", + name='cbf_wf', ) workflow.connect([ (inputnode, cbf_wf, [ - ("t1w_mask", "inputnode.t1w_mask"), - ("t1w_tpms", "inputnode.t1w_tpms"), - ("m0scan_metadata", "inputnode.m0scan_metadata"), + ('t1w_mask', 'inputnode.t1w_mask'), + ('t1w_tpms', 'inputnode.t1w_tpms'), + ('m0scan_metadata', 'inputnode.m0scan_metadata'), ]), (asl_fit_wf, cbf_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), # Use post-HMC+SDC ASL as basis for CBF calculation (asl_native_wf, cbf_wf, [ - ("outputnode.asl_native", "inputnode.asl_file"), - ("outputnode.aslcontext", "inputnode.aslcontext"), - ("outputnode.m0scan_native", "inputnode.m0scan"), + ('outputnode.asl_native', 'inputnode.asl_file'), + ('outputnode.aslcontext', 'inputnode.aslcontext'), + ('outputnode.m0scan_native', 'inputnode.m0scan'), ]), ]) # fmt:skip - if config.workflow.level == "minimal": + if config.workflow.level == 'minimal': return workflow # @@ -417,19 +416,19 @@ def init_asl_wf( asl_confounds_wf = init_asl_confounds_wf( n_volumes=n_vols, - mem_gb=mem_gb["largemem"], + mem_gb=mem_gb['largemem'], freesurfer=config.workflow.run_reconall, - name="asl_confounds_wf", + name='asl_confounds_wf', ) ds_confounds = pe.Node( DerivativesDataSink( base_directory=config.execution.aslprep_dir, - desc="confounds", - suffix="timeseries", - dismiss_entities=("echo",), + desc='confounds', + suffix='timeseries', + dismiss_entities=('echo',), ), - name="ds_confounds", + name='ds_confounds', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -437,20 +436,20 @@ def init_asl_wf( workflow.connect([ (inputnode, asl_confounds_wf, [ - ("t1w_tpms", "inputnode.t1w_tpms"), - ("t1w_mask", "inputnode.t1w_mask"), + ('t1w_tpms', 'inputnode.t1w_tpms'), + ('t1w_mask', 'inputnode.t1w_mask'), ]), (asl_fit_wf, asl_confounds_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.movpar_file", "inputnode.movpar_file"), - ("outputnode.rmsd_file", "inputnode.rmsd_file"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), - ("outputnode.dummy_scans", "inputnode.skip_vols"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.movpar_file', 'inputnode.movpar_file'), + ('outputnode.rmsd_file', 'inputnode.rmsd_file'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), + ('outputnode.dummy_scans', 'inputnode.skip_vols'), ]), - (asl_native_wf, asl_confounds_wf, [("outputnode.asl_native", "inputnode.asl")]), + (asl_native_wf, asl_confounds_wf, [('outputnode.asl_native', 'inputnode.asl')]), (asl_confounds_wf, ds_confounds, [ - ("outputnode.confounds_file", "in_file"), - ("outputnode.confounds_metadata", "meta_dict"), + ('outputnode.confounds_file', 'in_file'), + ('outputnode.confounds_metadata', 'meta_dict'), ]), ]) # fmt:skip @@ -458,70 +457,70 @@ def init_asl_wf( cbf_confounds_wf = init_cbf_confounds_wf( scorescrub=scorescrub, basil=basil, - name="cbf_confounds_wf", + name='cbf_confounds_wf', ) workflow.connect([ (inputnode, cbf_confounds_wf, [ - ("asl_file", "inputnode.name_source"), - ("t1w_tpms", "inputnode.t1w_tpms"), - ("t1w_mask", "inputnode.t1w_mask"), - ("anat2mni2009c_xfm", "inputnode.anat2mni2009c_xfm"), + ('asl_file', 'inputnode.name_source'), + ('t1w_tpms', 'inputnode.t1w_tpms'), + ('t1w_mask', 'inputnode.t1w_mask'), + ('anat2mni2009c_xfm', 'inputnode.anat2mni2009c_xfm'), ]), (asl_fit_wf, cbf_confounds_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), - ("outputnode.rmsd_file", "inputnode.rmsd_file"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), + ('outputnode.rmsd_file', 'inputnode.rmsd_file'), ]), (asl_confounds_wf, cbf_confounds_wf, [ - ("outputnode.confounds_file", "inputnode.confounds_file"), + ('outputnode.confounds_file', 'inputnode.confounds_file'), ]), ]) # fmt:skip for cbf_deriv in cbf_3d_derivs: workflow.connect([ - (cbf_wf, cbf_confounds_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, cbf_confounds_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip # Plot CBF outputs. # NOTE: CIFTI input won't be provided unless level is set to 'full'. cbf_reporting_wf = init_cbf_reporting_wf( metadata=metadata, - plot_timeseries=not (is_multi_pld or use_ge or (config.workflow.level == "resampling")), + plot_timeseries=not (is_multi_pld or use_ge or (config.workflow.level == 'resampling')), scorescrub=scorescrub, basil=basil, - name="cbf_reporting_wf", + name='cbf_reporting_wf', ) workflow.connect([ (inputnode, cbf_reporting_wf, [ - ("t1w_dseg", "inputnode.t1w_dseg"), - ("mni2009c2anat_xfm", "inputnode.std2anat_xfm"), + ('t1w_dseg', 'inputnode.t1w_dseg'), + ('mni2009c2anat_xfm', 'inputnode.std2anat_xfm'), ]), (asl_confounds_wf, cbf_reporting_wf, [ - ("outputnode.confounds_file", "inputnode.confounds_file"), + ('outputnode.confounds_file', 'inputnode.confounds_file'), ]), (cbf_wf, cbf_reporting_wf, [ - ("outputnode.score_outlier_index", "inputnode.score_outlier_index"), + ('outputnode.score_outlier_index', 'inputnode.score_outlier_index'), ]), - (cbf_confounds_wf, cbf_reporting_wf, [("outputnode.qc_file", "inputnode.qc_file")]), + (cbf_confounds_wf, cbf_reporting_wf, [('outputnode.qc_file', 'inputnode.qc_file')]), (asl_fit_wf, cbf_reporting_wf, [ - ("outputnode.coreg_aslref", "inputnode.aslref"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.coreg_aslref', 'inputnode.aslref'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), # XXX: Used to use the one from refine_mask/reduce_mask - ("outputnode.asl_mask", "inputnode.asl_mask"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), ]), (asl_confounds_wf, cbf_reporting_wf, [ - ("outputnode.crown_mask", "inputnode.crown_mask"), - ("outputnode.acompcor_masks", "inputnode.acompcor_masks"), + ('outputnode.crown_mask', 'inputnode.crown_mask'), + ('outputnode.acompcor_masks', 'inputnode.acompcor_masks'), ]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ - (cbf_wf, cbf_reporting_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, cbf_reporting_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip # If we want aslref-space outputs, then call the appropriate workflow - aslref_out = bool(nonstd_spaces.intersection(("func", "run", "asl", "aslref", "sbref"))) - aslref_out &= config.workflow.level == "full" + aslref_out = bool(nonstd_spaces.intersection(('func', 'run', 'asl', 'aslref', 'sbref'))) + aslref_out &= config.workflow.level == 'full' if aslref_out: ds_asl_native_wf = init_ds_asl_native_wf( @@ -536,21 +535,21 @@ def init_asl_wf( ds_asl_native_wf.inputs.inputnode.source_files = [asl_file] workflow.connect([ - (asl_fit_wf, ds_asl_native_wf, [("outputnode.asl_mask", "inputnode.asl_mask")]), - (asl_native_wf, ds_asl_native_wf, [("outputnode.asl_native", "inputnode.asl")]), + (asl_fit_wf, ds_asl_native_wf, [('outputnode.asl_mask', 'inputnode.asl_mask')]), + (asl_native_wf, ds_asl_native_wf, [('outputnode.asl_native', 'inputnode.asl')]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ (cbf_wf, ds_asl_native_wf, [ - (f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}"), + (f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}'), ]), ]) # fmt:skip - if config.workflow.level == "resampling": + if config.workflow.level == 'resampling': # Fill in datasinks of reportlets seen so far for node in workflow.list_node_names(): - if node.split(".")[-1].startswith("ds_report"): + if node.split('.')[-1].startswith('ds_report'): workflow.get_node(node).inputs.base_directory = config.execution.aslprep_dir workflow.get_node(node).inputs.source_file = asl_file @@ -563,32 +562,32 @@ def init_asl_wf( fieldmap_id=fieldmap_id, omp_nthreads=omp_nthreads, mem_gb=mem_gb, - jacobian="fmap-jacobian" not in config.workflow.ignore, - name="asl_anat_wf", + jacobian='fmap-jacobian' not in config.workflow.ignore, + name='asl_anat_wf', ) - asl_anat_wf.inputs.inputnode.resolution = "native" + asl_anat_wf.inputs.inputnode.resolution = 'native' workflow.connect([ (inputnode, asl_anat_wf, [ - ("t1w_preproc", "inputnode.target_ref_file"), - ("t1w_mask", "inputnode.target_mask"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_id", "inputnode.fmap_id"), + ('t1w_preproc', 'inputnode.target_ref_file'), + ('t1w_mask', 'inputnode.target_mask'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_id', 'inputnode.fmap_id'), ]), (asl_fit_wf, asl_anat_wf, [ - ("outputnode.coreg_aslref", "inputnode.bold_ref_file"), - ("outputnode.aslref2fmap_xfm", "inputnode.boldref2fmap_xfm"), - ("outputnode.aslref2anat_xfm", "inputnode.boldref2anat_xfm"), + ('outputnode.coreg_aslref', 'inputnode.bold_ref_file'), + ('outputnode.aslref2fmap_xfm', 'inputnode.boldref2fmap_xfm'), + ('outputnode.aslref2anat_xfm', 'inputnode.boldref2anat_xfm'), ]), (asl_native_wf, asl_anat_wf, [ - ("outputnode.asl_minimal", "inputnode.bold_file"), - ("outputnode.motion_xfm", "inputnode.motion_xfm"), + ('outputnode.asl_minimal', 'inputnode.bold_file'), + ('outputnode.motion_xfm', 'inputnode.motion_xfm'), ]), ]) # fmt:skip # Write out anatomical-space derivatives. - if nonstd_spaces.intersection(("anat", "T1w")): + if nonstd_spaces.intersection(('anat', 'T1w')): ds_asl_t1_wf = init_ds_volumes_wf( bids_root=str(config.execution.bids_dir), output_dir=config.execution.aslprep_dir, @@ -596,24 +595,24 @@ def init_asl_wf( cbf_3d=cbf_3d_derivs, cbf_4d=cbf_4d_derivs, att=att_derivs, - name="ds_asl_t1_wf", + name='ds_asl_t1_wf', ) ds_asl_t1_wf.inputs.inputnode.source_files = [asl_file] - ds_asl_t1_wf.inputs.inputnode.space = "T1w" + ds_asl_t1_wf.inputs.inputnode.space = 'T1w' workflow.connect([ - (inputnode, ds_asl_t1_wf, [("t1w_preproc", "inputnode.ref_file")]), + (inputnode, ds_asl_t1_wf, [('t1w_preproc', 'inputnode.ref_file')]), (asl_fit_wf, ds_asl_t1_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.coreg_aslref", "inputnode.aslref"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.coreg_aslref', 'inputnode.aslref'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), - (asl_anat_wf, ds_asl_t1_wf, [("outputnode.bold_file", "inputnode.asl")]), + (asl_anat_wf, ds_asl_t1_wf, [('outputnode.bold_file', 'inputnode.asl')]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ - (cbf_wf, ds_asl_t1_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, ds_asl_t1_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip # Resample derivatives to standard space and write them out. @@ -627,8 +626,8 @@ def init_asl_wf( fieldmap_id=fieldmap_id, omp_nthreads=omp_nthreads, mem_gb=mem_gb, - jacobian="fmap-jacobian" not in config.workflow.ignore, - name="asl_std_wf", + jacobian='fmap-jacobian' not in config.workflow.ignore, + name='asl_std_wf', ) ds_asl_std_wf = init_ds_volumes_wf( bids_root=str(config.execution.bids_dir), @@ -637,49 +636,49 @@ def init_asl_wf( cbf_3d=cbf_3d_derivs, cbf_4d=cbf_4d_derivs, att=att_derivs, - name="ds_asl_std_wf", + name='ds_asl_std_wf', ) ds_asl_std_wf.inputs.inputnode.source_files = [asl_file] workflow.connect([ (inputnode, asl_std_wf, [ - ("std_t1w", "inputnode.target_ref_file"), - ("std_mask", "inputnode.target_mask"), - ("anat2std_xfm", "inputnode.anat2std_xfm"), - ("std_resolution", "inputnode.resolution"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_id", "inputnode.fmap_id"), + ('std_t1w', 'inputnode.target_ref_file'), + ('std_mask', 'inputnode.target_mask'), + ('anat2std_xfm', 'inputnode.anat2std_xfm'), + ('std_resolution', 'inputnode.resolution'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_id', 'inputnode.fmap_id'), ]), (asl_fit_wf, asl_std_wf, [ - ("outputnode.coreg_aslref", "inputnode.bold_ref_file"), - ("outputnode.aslref2fmap_xfm", "inputnode.boldref2fmap_xfm"), - ("outputnode.aslref2anat_xfm", "inputnode.boldref2anat_xfm"), + ('outputnode.coreg_aslref', 'inputnode.bold_ref_file'), + ('outputnode.aslref2fmap_xfm', 'inputnode.boldref2fmap_xfm'), + ('outputnode.aslref2anat_xfm', 'inputnode.boldref2anat_xfm'), ]), (asl_native_wf, asl_std_wf, [ - ("outputnode.asl_minimal", "inputnode.bold_file"), - ("outputnode.motion_xfm", "inputnode.motion_xfm"), + ('outputnode.asl_minimal', 'inputnode.bold_file'), + ('outputnode.motion_xfm', 'inputnode.motion_xfm'), ]), (inputnode, ds_asl_std_wf, [ - ("anat2std_xfm", "inputnode.anat2std_xfm"), - ("std_space", "inputnode.space"), - ("std_resolution", "inputnode.resolution"), - ("std_cohort", "inputnode.cohort"), + ('anat2std_xfm', 'inputnode.anat2std_xfm'), + ('std_space', 'inputnode.space'), + ('std_resolution', 'inputnode.resolution'), + ('std_cohort', 'inputnode.cohort'), ]), (asl_fit_wf, ds_asl_std_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.coreg_aslref", "inputnode.aslref"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.coreg_aslref', 'inputnode.aslref'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), (asl_std_wf, ds_asl_std_wf, [ - ("outputnode.bold_file", "inputnode.asl"), - ("outputnode.resampling_reference", "inputnode.ref_file"), + ('outputnode.bold_file', 'inputnode.asl'), + ('outputnode.resampling_reference', 'inputnode.ref_file'), ]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ - (cbf_wf, ds_asl_std_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, ds_asl_std_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip # GIFTI outputs @@ -689,13 +688,13 @@ def init_asl_wf( workflow.__postdesc__ += """\ Non-gridded (surface) resamplings were performed using `mri_vol2surf` (FreeSurfer). """ - config.loggers.workflow.debug("Creating ASL surface-sampling workflow.") + config.loggers.workflow.debug('Creating ASL surface-sampling workflow.') # init_bold_surf_wf uses prepare_timing_parameters, which uses the config object. # The uninitialized fMRIPrep config will have config.workflow.ignore set to None # instead of a list, which will raise an error. asl_surf_wf = init_asl_surf_wf( - mem_gb=mem_gb["resampled"], + mem_gb=mem_gb['resampled'], metadata=metadata, surface_spaces=freesurfer_spaces, medial_surface_nan=config.workflow.medial_surface_nan, @@ -703,23 +702,23 @@ def init_asl_wf( cbf_3d=cbf_3d_derivs, cbf_4d=cbf_4d_derivs, att=att_derivs, - name="asl_surf_wf", + name='asl_surf_wf', ) asl_surf_wf.inputs.inputnode.source_file = asl_file workflow.connect([ (inputnode, asl_surf_wf, [ - ("t1w_preproc", "inputnode.anat"), - ("subjects_dir", "inputnode.subjects_dir"), - ("subject_id", "inputnode.subject_id"), - ("fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), + ('t1w_preproc', 'inputnode.anat'), + ('subjects_dir', 'inputnode.subjects_dir'), + ('subject_id', 'inputnode.subject_id'), + ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'), ]), (asl_fit_wf, asl_surf_wf, [ - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ - (cbf_wf, asl_surf_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, asl_surf_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip if config.workflow.cifti_output: @@ -732,29 +731,29 @@ def init_asl_wf( workflow.connect([ (inputnode, asl_cifti_resample_wf, [ - ("mni6_mask", "inputnode.mni6_mask"), - ("anat2mni6_xfm", "inputnode.anat2mni6_xfm"), - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_coeff", "inputnode.fmap_coeff"), - ("fmap_id", "inputnode.fmap_id"), - ("white", "inputnode.white"), - ("pial", "inputnode.pial"), - ("midthickness", "inputnode.midthickness"), - ("midthickness_fsLR", "inputnode.midthickness_fsLR"), - ("sphere_reg_fsLR", "inputnode.sphere_reg_fsLR"), - ("cortex_mask", "inputnode.cortex_mask"), - ("anat_ribbon", "inputnode.anat_ribbon"), + ('mni6_mask', 'inputnode.mni6_mask'), + ('anat2mni6_xfm', 'inputnode.anat2mni6_xfm'), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_coeff', 'inputnode.fmap_coeff'), + ('fmap_id', 'inputnode.fmap_id'), + ('white', 'inputnode.white'), + ('pial', 'inputnode.pial'), + ('midthickness', 'inputnode.midthickness'), + ('midthickness_fsLR', 'inputnode.midthickness_fsLR'), + ('sphere_reg_fsLR', 'inputnode.sphere_reg_fsLR'), + ('cortex_mask', 'inputnode.cortex_mask'), + ('anat_ribbon', 'inputnode.anat_ribbon'), ]), (asl_fit_wf, asl_cifti_resample_wf, [ - ("outputnode.coreg_aslref", "inputnode.coreg_aslref"), - ("outputnode.aslref2fmap_xfm", "inputnode.aslref2fmap_xfm"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.coreg_aslref', 'inputnode.coreg_aslref'), + ('outputnode.aslref2fmap_xfm', 'inputnode.aslref2fmap_xfm'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), (asl_native_wf, asl_cifti_resample_wf, [ - ("outputnode.asl_minimal", "inputnode.asl_file"), - ("outputnode.motion_xfm", "inputnode.motion_xfm"), + ('outputnode.asl_minimal', 'inputnode.asl_file'), + ('outputnode.motion_xfm', 'inputnode.motion_xfm'), ]), - (asl_anat_wf, asl_cifti_resample_wf, [("outputnode.bold_file", "inputnode.asl_anat")]), + (asl_anat_wf, asl_cifti_resample_wf, [('outputnode.bold_file', 'inputnode.asl_anat')]), ]) # fmt:skip ds_asl_cifti_wf = init_ds_ciftis_wf( @@ -765,39 +764,39 @@ def init_asl_wf( cbf_4d=cbf_4d_derivs, att=att_derivs, omp_nthreads=omp_nthreads, - name="ds_asl_cifti_wf", + name='ds_asl_cifti_wf', ) ds_asl_cifti_wf.inputs.inputnode.source_files = [asl_file] workflow.connect([ (inputnode, ds_asl_cifti_wf, [ - ("t1w_preproc", "inputnode.anat"), - ("mni6_mask", "inputnode.mni6_mask"), - ("anat2mni6_xfm", "inputnode.anat2mni6_xfm"), - ("white", "inputnode.white"), - ("pial", "inputnode.pial"), - ("midthickness", "inputnode.midthickness"), - ("midthickness_fsLR", "inputnode.midthickness_fsLR"), - ("sphere_reg_fsLR", "inputnode.sphere_reg_fsLR"), - ("cortex_mask", "inputnode.cortex_mask"), + ('t1w_preproc', 'inputnode.anat'), + ('mni6_mask', 'inputnode.mni6_mask'), + ('anat2mni6_xfm', 'inputnode.anat2mni6_xfm'), + ('white', 'inputnode.white'), + ('pial', 'inputnode.pial'), + ('midthickness', 'inputnode.midthickness'), + ('midthickness_fsLR', 'inputnode.midthickness_fsLR'), + ('sphere_reg_fsLR', 'inputnode.sphere_reg_fsLR'), + ('cortex_mask', 'inputnode.cortex_mask'), ]), (asl_fit_wf, ds_asl_cifti_wf, [ - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), (asl_cifti_resample_wf, ds_asl_cifti_wf, [ - ("outputnode.asl_cifti", "inputnode.asl_cifti"), - ("outputnode.goodvoxels_mask", "inputnode.goodvoxels_mask"), + ('outputnode.asl_cifti', 'inputnode.asl_cifti'), + ('outputnode.goodvoxels_mask', 'inputnode.goodvoxels_mask'), ]), ]) # fmt:skip for cbf_deriv in cbf_derivs: workflow.connect([ - (cbf_wf, ds_asl_cifti_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]), + (cbf_wf, ds_asl_cifti_wf, [(f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}')]), ]) # fmt:skip # Feed CIFTI into CBF-reporting workflow - if "cbf_ts" in cbf_4d_derivs: + if 'cbf_ts' in cbf_4d_derivs: workflow.connect([ (ds_asl_cifti_wf, cbf_reporting_wf, [ - ("outputnode.cbf_ts", "inputnode.cifti_cbf_ts"), + ('outputnode.cbf_ts', 'inputnode.cifti_cbf_ts'), ]), ]) # fmt:skip @@ -806,74 +805,74 @@ def init_asl_wf( # Don't make carpet plots for short or GE data if not use_ge: carpetplot_wf = init_carpetplot_wf( - mem_gb=mem_gb["resampled"], + mem_gb=mem_gb['resampled'], confounds_list=[ - ("global_signal", None, "GS"), - ("csf", None, "GSCSF"), - ("white_matter", None, "GSWM"), - ("std_dvars", None, "DVARS"), - ("framewise_displacement", "mm", "FD"), + ('global_signal', None, 'GS'), + ('csf', None, 'GSCSF'), + ('white_matter', None, 'GSWM'), + ('std_dvars', None, 'DVARS'), + ('framewise_displacement', 'mm', 'FD'), ], metadata=metadata, cifti_output=config.workflow.cifti_output, - suffix="asl", - name="carpetplot_wf", + suffix='asl', + name='carpetplot_wf', ) if config.workflow.cifti_output: workflow.connect([ (asl_cifti_resample_wf, carpetplot_wf, [ - ("outputnode.asl_cifti", "inputnode.cifti_asl"), + ('outputnode.asl_cifti', 'inputnode.cifti_asl'), ]), ]) # fmt:skip def _last(inlist): if not isinstance(inlist, list): - raise ValueError(f"_last: input is not list ({inlist})") + raise ValueError(f'_last: input is not list ({inlist})') return inlist[-1] workflow.connect([ - (inputnode, carpetplot_wf, [("mni2009c2anat_xfm", "inputnode.std2anat_xfm")]), + (inputnode, carpetplot_wf, [('mni2009c2anat_xfm', 'inputnode.std2anat_xfm')]), (asl_fit_wf, carpetplot_wf, [ - ("outputnode.dummy_scans", "inputnode.dummy_scans"), - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.dummy_scans', 'inputnode.dummy_scans'), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), - (asl_native_wf, carpetplot_wf, [("outputnode.asl_native", "inputnode.asl")]), + (asl_native_wf, carpetplot_wf, [('outputnode.asl_native', 'inputnode.asl')]), (asl_confounds_wf, carpetplot_wf, [ - ("outputnode.confounds_file", "inputnode.confounds_file"), - ("outputnode.crown_mask", "inputnode.crown_mask"), - (("outputnode.acompcor_masks", _last), "inputnode.acompcor_mask"), + ('outputnode.confounds_file', 'inputnode.confounds_file'), + ('outputnode.crown_mask', 'inputnode.crown_mask'), + (('outputnode.acompcor_masks', _last), 'inputnode.acompcor_mask'), ]), ]) # fmt:skip # Parcellate CBF maps and write out parcellated TSV files and atlases parcellate_cbf_wf = init_parcellate_cbf_wf( cbf_3d=cbf_3d_derivs, - name="parcellate_cbf_wf", + name='parcellate_cbf_wf', ) workflow.connect([ (inputnode, parcellate_cbf_wf, [ - ("asl_file", "inputnode.source_file"), - ("mni2009c2anat_xfm", "inputnode.MNI152NLin2009cAsym_to_anat_xfm"), + ('asl_file', 'inputnode.source_file'), + ('mni2009c2anat_xfm', 'inputnode.MNI152NLin2009cAsym_to_anat_xfm'), ]), (asl_fit_wf, parcellate_cbf_wf, [ - ("outputnode.asl_mask", "inputnode.asl_mask"), - ("outputnode.aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('outputnode.asl_mask', 'inputnode.asl_mask'), + ('outputnode.aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), ]) # fmt:skip for cbf_deriv in cbf_3d_derivs: workflow.connect([ (cbf_wf, parcellate_cbf_wf, [ - (f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}"), + (f'outputnode.{cbf_deriv}', f'inputnode.{cbf_deriv}'), ]), ]) # fmt:skip # Fill in datasinks of reportlets seen so far for node in workflow.list_node_names(): - if node.split(".")[-1].startswith("ds_report"): + if node.split('.')[-1].startswith('ds_report'): workflow.get_node(node).inputs.base_directory = config.execution.aslprep_dir workflow.get_node(node).inputs.source_file = asl_file @@ -885,6 +884,6 @@ def _read_json(in_file): from pathlib import Path if not isinstance(in_file, str): - raise ValueError(f"_read_json: input is not str ({in_file})") + raise ValueError(f'_read_json: input is not str ({in_file})') return loads(Path(in_file).read_text()) diff --git a/aslprep/workflows/asl/cbf.py b/aslprep/workflows/asl/cbf.py index 648ddcaaf..00e87a26d 100644 --- a/aslprep/workflows/asl/cbf.py +++ b/aslprep/workflows/asl/cbf.py @@ -41,7 +41,7 @@ def init_cbf_wf( basil=False, m0_scale=1, smooth_kernel=5, - name="cbf_wf", + name='cbf_wf', ): """Create a workflow for :abbr:`CCBF (compute cbf)`. @@ -113,39 +113,39 @@ def init_cbf_wf( """ - m0type = metadata["M0Type"] + m0type = metadata['M0Type'] is_casl = pcasl_or_pasl(metadata=metadata) is_multi_pld = determine_multi_pld(metadata=metadata) - if (processing_target == "cbf") and not basil: - config.loggers.workflow.info(f"Only CBF volumes are detected in {name_source}.") - elif processing_target == "cbf": + if (processing_target == 'cbf') and not basil: + config.loggers.workflow.info(f'Only CBF volumes are detected in {name_source}.') + elif processing_target == 'cbf': config.loggers.workflow.warning( - f"Only CBF volumes are detected in {name_source}. " - "BASIL will automatically be disabled." + f'Only CBF volumes are detected in {name_source}. ' + 'BASIL will automatically be disabled.' ) basil = False - if m0type in ("Included", "Separate"): + if m0type in ('Included', 'Separate'): m0_str = ( - "Calibration (M0) volumes associated with the ASL scan were smoothed with a " - f"Gaussian kernel (FWHM={smooth_kernel} mm) and the average calibration image was " - f"calculated and scaled by {m0_scale}." + 'Calibration (M0) volumes associated with the ASL scan were smoothed with a ' + f'Gaussian kernel (FWHM={smooth_kernel} mm) and the average calibration image was ' + f'calculated and scaled by {m0_scale}.' ) - elif m0type == "Estimate": + elif m0type == 'Estimate': m0_str = ( f"A single M0 estimate of {metadata['M0Estimate']} was used to produce a calibration " f"'image' and was scaled by {m0_scale}." ) else: m0_str = ( - f"As no calibration images or provided M0 estimate was available for the ASL scan, " - "the control volumes used as a substitute. " - "The control volumes in the ASL scans were smoothed with a " - f"Gaussian kernel (FWHM={smooth_kernel} mm) and the average control image was " - f"calculated and scaled by {m0_scale}." + f'As no calibration images or provided M0 estimate was available for the ASL scan, ' + 'the control volumes used as a substitute. ' + 'The control volumes in the ASL scans were smoothed with a ' + f'Gaussian kernel (FWHM={smooth_kernel} mm) and the average control image was ' + f'calculated and scaled by {m0_scale}.' ) - if processing_target == "cbf": + if processing_target == 'cbf': workflow.__desc__ += """\ *ASLPrep* loaded pre-calculated cerebral blood flow (CBF) data from the ASL file. """ @@ -179,29 +179,29 @@ def init_cbf_wf( """ else: - bcut = metadata.get("BolusCutOffTechnique") + bcut = metadata.get('BolusCutOffTechnique') if is_multi_pld: raise ValueError( - "Multi-delay data are not supported for PASL sequences at the moment." + 'Multi-delay data are not supported for PASL sequences at the moment.' ) # Single-delay PASL data, with different bolus cut-off techniques - if bcut == "QUIPSS": + if bcut == 'QUIPSS': workflow.__desc__ += f"""\ *ASLPrep* calculated cerebral blood flow (CBF) from the single-delay PASL using a single-compartment general kinetic model [@buxton1998general] using the QUIPSS modification, as described in @wong1998quantitative. {m0_str} """ - elif bcut == "QUIPSSII": + elif bcut == 'QUIPSSII': workflow.__desc__ += f"""\ *ASLPrep* calculated cerebral blood flow (CBF) from the single-delay PASL using a single-compartment general kinetic model [@buxton1998general] using the QUIPSS II modification, as described in @alsop_recommended_2015. {m0_str} """ - elif bcut == "Q2TIPS": + elif bcut == 'Q2TIPS': workflow.__desc__ += f"""\ *ASLPrep* calculated cerebral blood flow (CBF) from the single-delay PASL using a single-compartment general kinetic model [@buxton1998general] @@ -210,68 +210,68 @@ def init_cbf_wf( """ else: # No bolus cutoff delay technique - raise ValueError("PASL without a bolus cut-off technique is not supported in ASLPrep.") + raise ValueError('PASL without a bolus cut-off technique is not supported in ASLPrep.') - if "SliceTiming" in metadata: + if 'SliceTiming' in metadata: workflow.__desc__ += ( - "Prior to calculating CBF, post-labeling delay values were shifted on a slice-wise " - "basis based on the slice timing." + 'Prior to calculating CBF, post-labeling delay values were shifted on a slice-wise ' + 'basis based on the slice timing.' ) inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_file", - "aslcontext", - "m0scan", - "m0scan_metadata", - "asl_mask", - "t1w_tpms", - "t1w_mask", - "aslref2anat_xfm", + 'asl_file', + 'aslcontext', + 'm0scan', + 'm0scan_metadata', + 'asl_mask', + 't1w_tpms', + 't1w_mask', + 'aslref2anat_xfm', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "mean_cbf", - "cbf_ts", # Only calculated for single-delay data - "att", # Only calculated for multi-delay data - "plds", + 'mean_cbf', + 'cbf_ts', # Only calculated for single-delay data + 'att', # Only calculated for multi-delay data + 'plds', # SCORE/SCRUB outputs - "cbf_ts_score", - "mean_cbf_score", - "mean_cbf_scrub", - "score_outlier_index", + 'cbf_ts_score', + 'mean_cbf_score', + 'mean_cbf_scrub', + 'score_outlier_index', # BASIL outputs - "mean_cbf_basil", - "mean_cbf_gm_basil", - "mean_cbf_wm_basil", - "att_basil", + 'mean_cbf_basil', + 'mean_cbf_gm_basil', + 'mean_cbf_wm_basil', + 'att_basil', ] ), - name="outputnode", + name='outputnode', ) warp_t1w_mask_to_asl = pe.Node( ApplyTransforms( dimension=3, float=True, - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', invert_transform_flags=[True], input_image_type=3, - args="-v", + args='-v', ), - name="warp_t1w_mask_to_asl", + name='warp_t1w_mask_to_asl', ) workflow.connect([ (inputnode, warp_t1w_mask_to_asl, [ - ("asl_mask", "reference_image"), - ("t1w_mask", "input_image"), - ("aslref2anat_xfm", "transforms"), + ('asl_mask', 'reference_image'), + ('t1w_mask', 'input_image'), + ('aslref2anat_xfm', 'transforms'), ]), ]) # fmt:skip @@ -279,28 +279,28 @@ def init_cbf_wf( RefineMask(), mem_gb=0.2, run_without_submitting=True, - name="reduce_mask", + name='reduce_mask', ) workflow.connect([ - (inputnode, reduce_mask, [("asl_mask", "asl_mask")]), - (warp_t1w_mask_to_asl, reduce_mask, [("output_image", "t1w_mask")]), + (inputnode, reduce_mask, [('asl_mask', 'asl_mask')]), + (warp_t1w_mask_to_asl, reduce_mask, [('output_image', 't1w_mask')]), ]) # fmt:skip # Warp tissue probability maps to ASL space def _pick_gm(files): if not isinstance(files, list): - raise ValueError(f"_pick_gm: input is not list ({files})") + raise ValueError(f'_pick_gm: input is not list ({files})') return files[0] def _pick_wm(files): if not isinstance(files, list): - raise ValueError(f"_pick_wm: input is not list ({files})") + raise ValueError(f'_pick_wm: input is not list ({files})') return files[1] def _pick_csf(files): if not isinstance(files, list): - raise ValueError(f"_pick_csf: input is not list ({files})") + raise ValueError(f'_pick_csf: input is not list ({files})') return files[2] def _getfiledir(file): @@ -310,58 +310,58 @@ def _getfiledir(file): gm_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="gm_tfm", + name='gm_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, gm_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_gm), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_gm), 'input_image'), ]), ]) # fmt:skip wm_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="wm_tfm", + name='wm_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, wm_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_wm), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_wm), 'input_image'), ]), ]) # fmt:skip csf_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="csf_tfm", + name='csf_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, csf_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_csf), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_csf), 'input_image'), ]), ]) # fmt:skip @@ -374,65 +374,65 @@ def _getfiledir(file): ), mem_gb=0.2, run_without_submitting=True, - name="extract_deltam", + name='extract_deltam', ) workflow.connect([ (inputnode, extract_deltam, [ - ("asl_file", "asl_file"), - ("aslcontext", "aslcontext"), - ("m0scan_metadata", "m0scan_metadata"), + ('asl_file', 'asl_file'), + ('aslcontext', 'aslcontext'), + ('m0scan_metadata', 'm0scan_metadata'), ]), - (reduce_mask, extract_deltam, [("out_mask", "in_mask")]), + (reduce_mask, extract_deltam, [('out_mask', 'in_mask')]), ]) # fmt:skip - if metadata["M0Type"] == "Separate": - mean_m0 = pe.Node(RobustAverage(), name="mean_m0", mem_gb=1) + if metadata['M0Type'] == 'Separate': + mean_m0 = pe.Node(RobustAverage(), name='mean_m0', mem_gb=1) workflow.connect([ - (inputnode, mean_m0, [("m0scan", "in_file")]), - (mean_m0, extract_deltam, [("out_file", "m0scan")]), + (inputnode, mean_m0, [('m0scan', 'in_file')]), + (mean_m0, extract_deltam, [('out_file', 'm0scan')]), ]) # fmt:skip enhance_and_skullstrip_m0scan_wf = init_enhance_and_skullstrip_bold_wf( pre_mask=False, omp_nthreads=1, - name="enhance_and_skullstrip_m0scan_wf", + name='enhance_and_skullstrip_m0scan_wf', ) workflow.connect([ - (mean_m0, enhance_and_skullstrip_m0scan_wf, [("out_file", "inputnode.in_file")]), - (enhance_and_skullstrip_m0scan_wf, reduce_mask, [("outputnode.mask_file", "m0_mask")]), + (mean_m0, enhance_and_skullstrip_m0scan_wf, [('out_file', 'inputnode.in_file')]), + (enhance_and_skullstrip_m0scan_wf, reduce_mask, [('outputnode.mask_file', 'm0_mask')]), ]) # fmt:skip compute_cbf = pe.Node( ComputeCBF( - cbf_only=processing_target == "cbf", + cbf_only=processing_target == 'cbf', m0_scale=m0_scale, ), mem_gb=0.2, run_without_submitting=True, - name="compute_cbf", + name='compute_cbf', ) workflow.connect([ - (reduce_mask, compute_cbf, [("out_mask", "mask")]), + (reduce_mask, compute_cbf, [('out_mask', 'mask')]), (extract_deltam, compute_cbf, [ - ("out_file", "deltam"), - ("m0_file", "m0_file"), - ("metadata", "metadata"), + ('out_file', 'deltam'), + ('m0_file', 'm0_file'), + ('metadata', 'metadata'), ]), (compute_cbf, outputnode, [ - ("cbf_ts", "cbf_ts"), - ("mean_cbf", "mean_cbf"), - ("att", "att"), - ("plds", "plds"), + ('cbf_ts', 'cbf_ts'), + ('mean_cbf', 'mean_cbf'), + ('att', 'att'), + ('plds', 'plds'), ]), ]) # fmt:skip if scorescrub: score_and_scrub_cbf = pe.Node( - ScoreAndScrubCBF(tpm_threshold=0.7, wavelet_function="huber"), + ScoreAndScrubCBF(tpm_threshold=0.7, wavelet_function='huber'), mem_gb=0.2, - name="score_and_scrub_cbf", + name='score_and_scrub_cbf', run_without_submitting=True, ) @@ -445,16 +445,16 @@ def _getfiledir(file): [@dolui2017structural;@dolui2016scrub]. """ workflow.connect([ - (reduce_mask, score_and_scrub_cbf, [("out_mask", "mask")]), - (compute_cbf, score_and_scrub_cbf, [("cbf_ts", "cbf_ts")]), - (gm_tfm, score_and_scrub_cbf, [("output_image", "gm_tpm")]), - (wm_tfm, score_and_scrub_cbf, [("output_image", "wm_tpm")]), - (csf_tfm, score_and_scrub_cbf, [("output_image", "csf_tpm")]), + (reduce_mask, score_and_scrub_cbf, [('out_mask', 'mask')]), + (compute_cbf, score_and_scrub_cbf, [('cbf_ts', 'cbf_ts')]), + (gm_tfm, score_and_scrub_cbf, [('output_image', 'gm_tpm')]), + (wm_tfm, score_and_scrub_cbf, [('output_image', 'wm_tpm')]), + (csf_tfm, score_and_scrub_cbf, [('output_image', 'csf_tpm')]), (score_and_scrub_cbf, outputnode, [ - ("cbf_ts_score", "cbf_ts_score"), - ("score_outlier_index", "score_outlier_index"), - ("mean_cbf_score", "mean_cbf_score"), - ("mean_cbf_scrub", "mean_cbf_scrub"), + ('cbf_ts_score', 'cbf_ts_score'), + ('score_outlier_index', 'score_outlier_index'), + ('mean_cbf_score', 'mean_cbf_score'), + ('mean_cbf_scrub', 'mean_cbf_scrub'), ]), ]) # fmt:skip @@ -470,52 +470,52 @@ def _getfiledir(file): determine_bolus_duration = pe.Node( niu.Function( function=get_bolus_duration, - input_names=["metadata", "is_casl"], - output_names=["bolus"], + input_names=['metadata', 'is_casl'], + output_names=['bolus'], ), - name="determine_bolus_duration", + name='determine_bolus_duration', ) determine_bolus_duration.inputs.is_casl = is_casl - workflow.connect([(extract_deltam, determine_bolus_duration, [("metadata", "metadata")])]) + workflow.connect([(extract_deltam, determine_bolus_duration, [('metadata', 'metadata')])]) # Node to define tis determine_inflow_times = pe.Node( niu.Function( function=get_inflow_times, - input_names=["metadata", "is_casl"], - output_names=["tis"], + input_names=['metadata', 'is_casl'], + output_names=['tis'], ), - name="determine_inflow_times", + name='determine_inflow_times', ) determine_inflow_times.inputs.is_casl = is_casl - workflow.connect([(extract_deltam, determine_inflow_times, [("metadata", "metadata")])]) + workflow.connect([(extract_deltam, determine_inflow_times, [('metadata', 'metadata')])]) # Node to estimate labeling efficiency estimate_alpha = pe.Node( niu.Function( function=estimate_labeling_efficiency, - input_names=["metadata"], - output_names=["labeling_efficiency"], + input_names=['metadata'], + output_names=['labeling_efficiency'], ), - name="estimate_alpha", + name='estimate_alpha', ) - workflow.connect([(extract_deltam, estimate_alpha, [("metadata", "metadata")])]) + workflow.connect([(extract_deltam, estimate_alpha, [('metadata', 'metadata')])]) basil_kwargs = {} - if "SliceTiming" in metadata.keys(): - slicetime_diffs = np.unique(np.diff(metadata["SliceTiming"])) + if 'SliceTiming' in metadata.keys(): + slicetime_diffs = np.unique(np.diff(metadata['SliceTiming'])) # Check if slice times are monotonic monotonic_slicetimes = slicetime_diffs.size == 1 # Check if slice times are ascending ascending_slicetimes = np.all(slicetime_diffs > 0) # Only set slicedt for ascending slice orders. if monotonic_slicetimes and ascending_slicetimes: - basil_kwargs["slice_spacing"] = slicetime_diffs[0] + basil_kwargs['slice_spacing'] = slicetime_diffs[0] else: config.loggers.interface.warning( - "Slice times are not ascending. They will be ignored in the BASIL call." + 'Slice times are not ascending. They will be ignored in the BASIL call.' ) basilcbf = pe.Node( @@ -525,33 +525,33 @@ def _getfiledir(file): pcasl=is_casl, **basil_kwargs, ), - name="basilcbf", + name='basilcbf', run_without_submitting=True, mem_gb=0.2, ) workflow.connect([ - (reduce_mask, basilcbf, [("out_mask", "mask")]), + (reduce_mask, basilcbf, [('out_mask', 'mask')]), (extract_deltam, basilcbf, [ - (("m0_file", _getfiledir), "out_basename"), - ("out_file", "deltam"), - ("m0_file", "mzero"), + (('m0_file', _getfiledir), 'out_basename'), + ('out_file', 'deltam'), + ('m0_file', 'mzero'), ]), - (determine_bolus_duration, basilcbf, [("bolus", "bolus")]), - (determine_inflow_times, basilcbf, [("tis", "tis")]), - (estimate_alpha, basilcbf, [("labeling_efficiency", "alpha")]), - (gm_tfm, basilcbf, [("output_image", "gm_tpm")]), - (wm_tfm, basilcbf, [("output_image", "wm_tpm")]), + (determine_bolus_duration, basilcbf, [('bolus', 'bolus')]), + (determine_inflow_times, basilcbf, [('tis', 'tis')]), + (estimate_alpha, basilcbf, [('labeling_efficiency', 'alpha')]), + (gm_tfm, basilcbf, [('output_image', 'gm_tpm')]), + (wm_tfm, basilcbf, [('output_image', 'wm_tpm')]), (basilcbf, outputnode, [ - ("mean_cbf_basil", "mean_cbf_basil"), - ("mean_cbf_gm_basil", "mean_cbf_gm_basil"), - ("mean_cbf_wm_basil", "mean_cbf_wm_basil"), - ("att_basil", "att_basil"), + ('mean_cbf_basil', 'mean_cbf_basil'), + ('mean_cbf_gm_basil', 'mean_cbf_gm_basil'), + ('mean_cbf_wm_basil', 'mean_cbf_wm_basil'), + ('att_basil', 'att_basil'), ]), ]) # fmt:skip - if metadata["M0Type"] != "Estimate": - workflow.connect([(extract_deltam, basilcbf, [("m0tr", "m0tr")])]) + if metadata['M0Type'] != 'Estimate': + workflow.connect([(extract_deltam, basilcbf, [('m0tr', 'm0tr')])]) return workflow @@ -561,7 +561,7 @@ def init_parcellate_cbf_wf( min_coverage=0.5, mem_gb=0.1, omp_nthreads=1, - name="parcellate_cbf_wf", + name='parcellate_cbf_wf', ): """Parcellate CBF results using a set of atlases. @@ -622,21 +622,21 @@ def init_parcellate_cbf_wf( Only defined if ``basil`` is True. """ CBF_ENTITIES = { - "mean_cbf": {}, - "mean_cbf_score": { - "desc": "score", + 'mean_cbf': {}, + 'mean_cbf_score': { + 'desc': 'score', }, - "mean_cbf_scrub": { - "desc": "scrub", + 'mean_cbf_scrub': { + 'desc': 'scrub', }, - "mean_cbf_basil": { - "desc": "basil", + 'mean_cbf_basil': { + 'desc': 'basil', }, - "mean_cbf_gm_basil": { - "desc": "basilGM", + 'mean_cbf_gm_basil': { + 'desc': 'basilGM', }, - "mean_cbf_wm_basil": { - "desc": "basilWM", + 'mean_cbf_wm_basil': { + 'desc': 'basilWM', }, } workflow = Workflow(name=name) @@ -655,53 +655,53 @@ def init_parcellate_cbf_wf( """ input_fields = [ - "source_file", - "asl_mask", - "aslref2anat_xfm", - "MNI152NLin2009cAsym_to_anat_xfm", + 'source_file', + 'asl_mask', + 'aslref2anat_xfm', + 'MNI152NLin2009cAsym_to_anat_xfm', ] input_fields += cbf_3d inputnode = pe.Node( niu.IdentityInterface(fields=input_fields), - name="inputnode", + name='inputnode', ) - output_fields = ["atlas_names"] + [f"{field}_parcellated" for field in cbf_3d] + output_fields = ['atlas_names'] + [f'{field}_parcellated' for field in cbf_3d] outputnode = pe.Node( niu.IdentityInterface(fields=output_fields), - name="outputnode", + name='outputnode', ) atlas_name_grabber = pe.Node( niu.Function( - input_names=["subset"], - output_names=["atlas_names"], + input_names=['subset'], + output_names=['atlas_names'], function=get_atlas_names, ), - name="atlas_name_grabber", + name='atlas_name_grabber', ) - atlas_name_grabber.inputs.subset = "all" - workflow.connect([(atlas_name_grabber, outputnode, [("atlas_names", "atlas_names")])]) + atlas_name_grabber.inputs.subset = 'all' + workflow.connect([(atlas_name_grabber, outputnode, [('atlas_names', 'atlas_names')])]) # get atlases via aslprep.data.load atlas_file_grabber = pe.MapNode( niu.Function( - input_names=["atlas_name"], - output_names=["atlas_file", "atlas_labels_file", "atlas_metadata_file"], + input_names=['atlas_name'], + output_names=['atlas_file', 'atlas_labels_file', 'atlas_metadata_file'], function=get_atlas_nifti, ), - name="atlas_file_grabber", - iterfield=["atlas_name"], + name='atlas_file_grabber', + iterfield=['atlas_name'], ) - workflow.connect([(atlas_name_grabber, atlas_file_grabber, [("atlas_names", "atlas_name")])]) + workflow.connect([(atlas_name_grabber, atlas_file_grabber, [('atlas_names', 'atlas_name')])]) # Atlases are in MNI152NLin6Asym MNI152NLin6Asym_to_MNI152NLin2009cAsym = str( get_template( - template="MNI152NLin2009cAsym", - mode="image", - suffix="xfm", - extension=".h5", - **{"from": "MNI152NLin6Asym"}, + template='MNI152NLin2009cAsym', + mode='image', + suffix='xfm', + extension='.h5', + **{'from': 'MNI152NLin6Asym'}, ), ) @@ -709,72 +709,72 @@ def init_parcellate_cbf_wf( # One of the output spaces selected by the user *may* be MNI152NLin6Asym, # but MNI152NLin2009cAsym is always used, so it's safer to go: # MNI152NLin6Asym --> MNI152NLin2009cAsym --> anat --> asl - merge_xforms = pe.Node(niu.Merge(3), name="merge_xforms") + merge_xforms = pe.Node(niu.Merge(3), name='merge_xforms') merge_xforms.inputs.in1 = MNI152NLin6Asym_to_MNI152NLin2009cAsym workflow.connect([ (inputnode, merge_xforms, [ - ("MNI152NLin2009cAsym_to_anat_xfm", "in2"), - ("aslref2anat_xfm", "in3"), + ('MNI152NLin2009cAsym_to_anat_xfm', 'in2'), + ('aslref2anat_xfm', 'in3'), ]), ]) # fmt:skip # Using the generated transforms, apply them to get everything in the correct MNI form warp_atlases_to_asl_space = pe.MapNode( ApplyTransforms( - interpolation="GenericLabel", + interpolation='GenericLabel', input_image_type=3, dimension=3, invert_transform_flags=[False, False, True], - args="-v", + args='-v', ), - name="warp_atlases_to_asl_space", - iterfield=["input_image"], + name='warp_atlases_to_asl_space', + iterfield=['input_image'], mem_gb=mem_gb, n_procs=omp_nthreads, ) workflow.connect([ - (inputnode, warp_atlases_to_asl_space, [("asl_mask", "reference_image")]), - (atlas_file_grabber, warp_atlases_to_asl_space, [("atlas_file", "input_image")]), - (merge_xforms, warp_atlases_to_asl_space, [("out", "transforms")]), + (inputnode, warp_atlases_to_asl_space, [('asl_mask', 'reference_image')]), + (atlas_file_grabber, warp_atlases_to_asl_space, [('atlas_file', 'input_image')]), + (merge_xforms, warp_atlases_to_asl_space, [('out', 'transforms')]), ]) # fmt:skip for cbf_type in cbf_3d: parcellate_cbf = pe.MapNode( ParcellateCBF(min_coverage=min_coverage), - name=f"parcellate_{cbf_type}", - iterfield=["atlas", "atlas_labels"], + name=f'parcellate_{cbf_type}', + iterfield=['atlas', 'atlas_labels'], mem_gb=mem_gb, ) workflow.connect([ (inputnode, parcellate_cbf, [ - (cbf_type, "in_file"), - ("asl_mask", "mask"), + (cbf_type, 'in_file'), + ('asl_mask', 'mask'), ]), - (atlas_file_grabber, parcellate_cbf, [("atlas_labels_file", "atlas_labels")]), - (warp_atlases_to_asl_space, parcellate_cbf, [("output_image", "atlas")]), + (atlas_file_grabber, parcellate_cbf, [('atlas_labels_file', 'atlas_labels')]), + (warp_atlases_to_asl_space, parcellate_cbf, [('output_image', 'atlas')]), ]) # fmt:skip ds_cbf = pe.MapNode( DerivativesDataSink( base_directory=config.execution.aslprep_dir, check_hdr=False, - suffix="cbf", + suffix='cbf', **CBF_ENTITIES[cbf_type], ), - name=f"ds_{cbf_type}", - iterfield=["atlas", "in_file"], + name=f'ds_{cbf_type}', + iterfield=['atlas', 'in_file'], run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_cbf, [("source_file", "source_file")]), - (atlas_name_grabber, ds_cbf, [("atlas_names", "atlas")]), - (parcellate_cbf, ds_cbf, [("timeseries", "in_file")]), + (inputnode, ds_cbf, [('source_file', 'source_file')]), + (atlas_name_grabber, ds_cbf, [('atlas_names', 'atlas')]), + (parcellate_cbf, ds_cbf, [('timeseries', 'in_file')]), ]) # fmt:skip - if cbf_type in ("mean_cbf", "mean_cbf_basil"): + if cbf_type in ('mean_cbf', 'mean_cbf_basil'): # I think it is easier to only retain the coverage file for the regular CBF estimates. # SCORE/SCRUB CBF should have the same coverage as the regular CBF. # BASIL might have different coverage because it drops any voxels with negative CBF, @@ -783,31 +783,31 @@ def init_parcellate_cbf_wf( DerivativesDataSink( base_directory=config.execution.aslprep_dir, check_hdr=False, - suffix="coverage", + suffix='coverage', **CBF_ENTITIES[cbf_type], ), - name=f"ds_coverage_{cbf_type}", - iterfield=["atlas", "in_file"], + name=f'ds_coverage_{cbf_type}', + iterfield=['atlas', 'in_file'], run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_coverage, [("source_file", "source_file")]), - (atlas_name_grabber, ds_coverage, [("atlas_names", "atlas")]), - (parcellate_cbf, ds_coverage, [("coverage", "in_file")]), + (inputnode, ds_coverage, [('source_file', 'source_file')]), + (atlas_name_grabber, ds_coverage, [('atlas_names', 'atlas')]), + (parcellate_cbf, ds_coverage, [('coverage', 'in_file')]), ]) # fmt:skip # Get entities from atlas for datasinks get_atlas_entities = pe.MapNode( niu.Function( - input_names=["filename"], - output_names=["tpl", "atlas", "res", "suffix", "extension"], + input_names=['filename'], + output_names=['tpl', 'atlas', 'res', 'suffix', 'extension'], function=find_atlas_entities, ), - name="get_atlas_entities", - iterfield=["filename"], + name='get_atlas_entities', + iterfield=['filename'], ) - workflow.connect([(atlas_file_grabber, get_atlas_entities, [("atlas_file", "filename")])]) + workflow.connect([(atlas_file_grabber, get_atlas_entities, [('atlas_file', 'filename')])]) # Write out standard-space atlas file. # This won't be in the same space that the data were parcellated in, @@ -816,23 +816,23 @@ def init_parcellate_cbf_wf( DerivativesDataSink( base_directory=config.execution.aslprep_dir, check_hdr=False, - dismiss_entities=["datatype", "subject", "session", "task", "run", "desc"], - allowed_entities=["space", "res", "den", "atlas", "desc", "cohort"], + dismiss_entities=['datatype', 'subject', 'session', 'task', 'run', 'desc'], + allowed_entities=['space', 'res', 'den', 'atlas', 'desc', 'cohort'], ), - name="ds_atlas", - iterfield=["space", "atlas", "resolution", "suffix", "extension", "in_file"], + name='ds_atlas', + iterfield=['space', 'atlas', 'resolution', 'suffix', 'extension', 'in_file'], run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_atlas, [("source_file", "source_file")]), - (atlas_file_grabber, ds_atlas, [("atlas_file", "in_file")]), + (inputnode, ds_atlas, [('source_file', 'source_file')]), + (atlas_file_grabber, ds_atlas, [('atlas_file', 'in_file')]), (get_atlas_entities, ds_atlas, [ - ("tpl", "space"), - ("atlas", "atlas"), - ("res", "resolution"), - ("suffix", "suffix"), - ("extension", "extension"), + ('tpl', 'space'), + ('atlas', 'atlas'), + ('res', 'resolution'), + ('suffix', 'suffix'), + ('extension', 'extension'), ]), ]) # fmt:skip @@ -841,31 +841,31 @@ def init_parcellate_cbf_wf( base_directory=config.execution.aslprep_dir, check_hdr=False, dismiss_entities=[ - "datatype", - "subject", - "session", - "task", - "run", - "desc", - "space", - "res", - "den", - "cohort", + 'datatype', + 'subject', + 'session', + 'task', + 'run', + 'desc', + 'space', + 'res', + 'den', + 'cohort', ], - allowed_entities=["atlas"], - extension=".tsv", + allowed_entities=['atlas'], + extension='.tsv', ), - name="ds_atlas_labels_file", - iterfield=["atlas", "suffix", "in_file"], + name='ds_atlas_labels_file', + iterfield=['atlas', 'suffix', 'in_file'], run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_atlas_labels_file, [("source_file", "source_file")]), - (atlas_file_grabber, ds_atlas_labels_file, [("atlas_labels_file", "in_file")]), + (inputnode, ds_atlas_labels_file, [('source_file', 'source_file')]), + (atlas_file_grabber, ds_atlas_labels_file, [('atlas_labels_file', 'in_file')]), (get_atlas_entities, ds_atlas_labels_file, [ - ("atlas", "atlas"), - ("suffix", "suffix"), + ('atlas', 'atlas'), + ('suffix', 'suffix'), ]), ]) # fmt:skip @@ -874,31 +874,31 @@ def init_parcellate_cbf_wf( base_directory=config.execution.aslprep_dir, check_hdr=False, dismiss_entities=[ - "datatype", - "subject", - "session", - "task", - "run", - "desc", - "space", - "res", - "den", - "cohort", + 'datatype', + 'subject', + 'session', + 'task', + 'run', + 'desc', + 'space', + 'res', + 'den', + 'cohort', ], - allowed_entities=["atlas"], - extension=".json", + allowed_entities=['atlas'], + extension='.json', ), - name="ds_atlas_metadata", - iterfield=["atlas", "suffix", "in_file"], + name='ds_atlas_metadata', + iterfield=['atlas', 'suffix', 'in_file'], run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_atlas_metadata, [("source_file", "source_file")]), - (atlas_file_grabber, ds_atlas_metadata, [("atlas_metadata_file", "in_file")]), + (inputnode, ds_atlas_metadata, [('source_file', 'source_file')]), + (atlas_file_grabber, ds_atlas_metadata, [('atlas_metadata_file', 'in_file')]), (get_atlas_entities, ds_atlas_metadata, [ - ("atlas", "atlas"), - ("suffix", "suffix"), + ('atlas', 'atlas'), + ('suffix', 'suffix'), ]), ]) # fmt:skip diff --git a/aslprep/workflows/asl/confounds.py b/aslprep/workflows/asl/confounds.py index 45f907922..9f3d0a788 100644 --- a/aslprep/workflows/asl/confounds.py +++ b/aslprep/workflows/asl/confounds.py @@ -20,7 +20,7 @@ def init_asl_confounds_wf( n_volumes: int, mem_gb: float, freesurfer: bool = False, - name: str = "asl_confounds_wf", + name: str = 'asl_confounds_wf', ): """Build a workflow to generate and write out confounding signals. @@ -111,190 +111,190 @@ def init_asl_confounds_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl", - "asl_mask", - "movpar_file", - "rmsd_file", - "skip_vols", - "t1w_mask", - "t1w_tpms", - "aslref2anat_xfm", + 'asl', + 'asl_mask', + 'movpar_file', + 'rmsd_file', + 'skip_vols', + 't1w_mask', + 't1w_tpms', + 'aslref2anat_xfm', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "confounds_file", - "confounds_metadata", - "acompcor_masks", - "crown_mask", + 'confounds_file', + 'confounds_metadata', + 'acompcor_masks', + 'crown_mask', ], ), - name="outputnode", + name='outputnode', ) add_motion_headers = pe.Node( - AddTSVHeader(columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]), - name="add_motion_headers", + AddTSVHeader(columns=['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']), + name='add_motion_headers', mem_gb=0.01, run_without_submitting=True, ) - workflow.connect([(inputnode, add_motion_headers, [("movpar_file", "in_file")])]) + workflow.connect([(inputnode, add_motion_headers, [('movpar_file', 'in_file')])]) if n_volumes > 2: # set to 2 bc relative arrays will be 1D instead of 2D for 1-volume data # DVARS dvars = pe.Node( nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True), - name="dvars", + name='dvars', mem_gb=mem_gb, ) workflow.connect([ (inputnode, dvars, [ - ("asl", "in_file"), - ("asl_mask", "in_mask"), + ('asl', 'in_file'), + ('asl_mask', 'in_mask'), ]), ]) # fmt:skip # Frame displacement fdisp = pe.Node( - nac.FramewiseDisplacement(parameter_source="SPM"), - name="fdisp", + nac.FramewiseDisplacement(parameter_source='SPM'), + name='fdisp', mem_gb=mem_gb, ) - workflow.connect([(inputnode, fdisp, [("movpar_file", "in_file")])]) + workflow.connect([(inputnode, fdisp, [('movpar_file', 'in_file')])]) # Arrange confounds add_dvars_header = pe.Node( - AddTSVHeader(columns=["dvars"]), - name="add_dvars_header", + AddTSVHeader(columns=['dvars']), + name='add_dvars_header', mem_gb=0.01, run_without_submitting=True, ) add_std_dvars_header = pe.Node( - AddTSVHeader(columns=["std_dvars"]), - name="add_std_dvars_header", + AddTSVHeader(columns=['std_dvars']), + name='add_std_dvars_header', mem_gb=0.01, run_without_submitting=True, ) add_rmsd_header = pe.Node( - AddTSVHeader(columns=["rmsd"]), - name="add_rmsd_header", + AddTSVHeader(columns=['rmsd']), + name='add_rmsd_header', mem_gb=0.01, run_without_submitting=True, ) workflow.connect([ # Collate computed confounds together - (inputnode, add_rmsd_header, [("rmsd_file", "in_file")]), - (dvars, add_dvars_header, [("out_nstd", "in_file")]), - (dvars, add_std_dvars_header, [("out_std", "in_file")]), + (inputnode, add_rmsd_header, [('rmsd_file', 'in_file')]), + (dvars, add_dvars_header, [('out_nstd', 'in_file')]), + (dvars, add_std_dvars_header, [('out_std', 'in_file')]), ]) # fmt:skip # Project T1w mask into BOLD space and merge with BOLD brainmask t1w_mask_tfm = pe.Node( - ApplyTransforms(interpolation="GenericLabel", invert_transform_flags=[True], args="-v"), - name="t1w_mask_tfm", + ApplyTransforms(interpolation='GenericLabel', invert_transform_flags=[True], args='-v'), + name='t1w_mask_tfm', ) - union_mask = pe.Node(niu.Function(function=_binary_union), name="union_mask") + union_mask = pe.Node(niu.Function(function=_binary_union), name='union_mask') # Create the crown mask - dilated_mask = pe.Node(BinaryDilation(), name="dilated_mask") - subtract_mask = pe.Node(BinarySubtraction(), name="subtract_mask") + dilated_mask = pe.Node(BinaryDilation(), name='dilated_mask') + subtract_mask = pe.Node(BinarySubtraction(), name='subtract_mask') workflow.connect([ # Brain mask (inputnode, t1w_mask_tfm, [ - ("t1w_mask", "input_image"), - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), + ('t1w_mask', 'input_image'), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), ]), - (inputnode, union_mask, [("asl_mask", "mask1")]), - (t1w_mask_tfm, union_mask, [("output_image", "mask2")]), - (union_mask, dilated_mask, [("out", "in_mask")]), - (union_mask, subtract_mask, [("out", "in_subtract")]), - (dilated_mask, subtract_mask, [("out_mask", "in_base")]), - (subtract_mask, outputnode, [("out_mask", "crown_mask")]), + (inputnode, union_mask, [('asl_mask', 'mask1')]), + (t1w_mask_tfm, union_mask, [('output_image', 'mask2')]), + (union_mask, dilated_mask, [('out', 'in_mask')]), + (union_mask, subtract_mask, [('out', 'in_subtract')]), + (dilated_mask, subtract_mask, [('out_mask', 'in_base')]), + (subtract_mask, outputnode, [('out_mask', 'crown_mask')]), ]) # fmt:skip # Generate aCompCor probseg maps - acc_masks = pe.Node(aCompCorMasks(is_aseg=freesurfer), name="acc_masks") + acc_masks = pe.Node(aCompCorMasks(is_aseg=freesurfer), name='acc_masks') workflow.connect([ (inputnode, acc_masks, [ - ("t1w_tpms", "in_vfs"), - (("asl", _get_zooms), "bold_zooms"), + ('t1w_tpms', 'in_vfs'), + (('asl', _get_zooms), 'bold_zooms'), ]), ]) # fmt:skip # Resample probseg maps in BOLD space via T1w-to-BOLD transform acc_msk_tfm = pe.MapNode( - ApplyTransforms(interpolation="Gaussian", invert_transform_flags=[True], args="-v"), - iterfield=["input_image"], - name="acc_msk_tfm", + ApplyTransforms(interpolation='Gaussian', invert_transform_flags=[True], args='-v'), + iterfield=['input_image'], + name='acc_msk_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, acc_msk_tfm, [ - ("aslref2anat_xfm", "transforms"), - ("asl_mask", "reference_image"), + ('aslref2anat_xfm', 'transforms'), + ('asl_mask', 'reference_image'), ]), - (acc_masks, acc_msk_tfm, [("out_masks", "input_image")]), + (acc_masks, acc_msk_tfm, [('out_masks', 'input_image')]), ]) # fmt:skip - acc_msk_brain = pe.MapNode(ApplyMask(), name="acc_msk_brain", iterfield=["in_file"]) + acc_msk_brain = pe.MapNode(ApplyMask(), name='acc_msk_brain', iterfield=['in_file']) workflow.connect([ - (inputnode, acc_msk_brain, [("asl_mask", "in_mask")]), - (acc_msk_tfm, acc_msk_brain, [("output_image", "in_file")]), + (inputnode, acc_msk_brain, [('asl_mask', 'in_mask')]), + (acc_msk_tfm, acc_msk_brain, [('output_image', 'in_file')]), ]) # fmt:skip - acc_msk_bin = pe.MapNode(Binarize(thresh_low=0.99), name="acc_msk_bin", iterfield=["in_file"]) + acc_msk_bin = pe.MapNode(Binarize(thresh_low=0.99), name='acc_msk_bin', iterfield=['in_file']) workflow.connect([ - (acc_msk_brain, acc_msk_bin, [("out_file", "in_file")]), - (acc_msk_bin, outputnode, [("out_file", "acompcor_masks")]), + (acc_msk_brain, acc_msk_bin, [('out_file', 'in_file')]), + (acc_msk_bin, outputnode, [('out_file', 'acompcor_masks')]), ]) # fmt:skip # Global and segment regressors signals_class_labels = [ - "global_signal", - "csf", - "white_matter", - "csf_wm", + 'global_signal', + 'csf', + 'white_matter', + 'csf_wm', ] merge_rois = pe.Node( niu.Merge(2, ravel_inputs=True), - name="merge_rois", + name='merge_rois', run_without_submitting=True, ) signals = pe.Node( SignalExtraction(class_labels=signals_class_labels), - name="signals", + name='signals', mem_gb=mem_gb, ) workflow.connect([ - (inputnode, merge_rois, [("asl_mask", "in1")]), - (acc_msk_bin, merge_rois, [("out_file", "in2")]), - (inputnode, signals, [("asl", "in_file")]), - (merge_rois, signals, [("out", "label_files")]), + (inputnode, merge_rois, [('asl_mask', 'in1')]), + (acc_msk_bin, merge_rois, [('out_file', 'in2')]), + (inputnode, signals, [('asl', 'in_file')]), + (merge_rois, signals, [('out', 'label_files')]), ]) # fmt:skip concat = pe.Node( GatherConfounds(), - name="concat", + name='concat', mem_gb=0.01, run_without_submitting=True, ) workflow.connect([ - (add_motion_headers, concat, [("out_file", "motion")]), - (signals, concat, [("out_file", "signals")]), - (concat, outputnode, [("confounds_file", "confounds_file")]), + (add_motion_headers, concat, [('out_file', 'motion')]), + (signals, concat, [('out_file', 'signals')]), + (concat, outputnode, [('confounds_file', 'confounds_file')]), ]) # fmt:skip if n_volumes > 2: workflow.connect([ - (fdisp, concat, [("out_file", "fd")]), - (add_rmsd_header, concat, [("out_file", "rmsd")]), - (add_dvars_header, concat, [("out_file", "dvars")]), - (add_std_dvars_header, concat, [("out_file", "std_dvars")]), + (fdisp, concat, [('out_file', 'fd')]), + (add_rmsd_header, concat, [('out_file', 'rmsd')]), + (add_dvars_header, concat, [('out_file', 'dvars')]), + (add_std_dvars_header, concat, [('out_file', 'std_dvars')]), ]) # fmt:skip return workflow @@ -305,8 +305,8 @@ def init_carpetplot_wf( confounds_list: list, metadata: dict, cifti_output: bool, - suffix: str = "asl", - name: str = "asl_carpet_wf", + suffix: str = 'asl', + name: str = 'asl_carpet_wf', ): """Build a workflow to generate *carpet* plots. @@ -361,48 +361,48 @@ def init_carpetplot_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl", - "asl_mask", - "confounds_file", - "aslref2anat_xfm", - "std2anat_xfm", - "cifti_asl", - "crown_mask", - "acompcor_mask", - "dummy_scans", + 'asl', + 'asl_mask', + 'confounds_file', + 'aslref2anat_xfm', + 'std2anat_xfm', + 'cifti_asl', + 'crown_mask', + 'acompcor_mask', + 'dummy_scans', ] ), - name="inputnode", + name='inputnode', ) - outputnode = pe.Node(niu.IdentityInterface(fields=["out_carpetplot"]), name="outputnode") + outputnode = pe.Node(niu.IdentityInterface(fields=['out_carpetplot']), name='outputnode') # Carpetplot and confounds plot conf_plot = pe.Node( ASLCarpetPlot( - tr=metadata["RepetitionTime"], + tr=metadata['RepetitionTime'], confounds_list=confounds_list, ), - name="conf_plot", + name='conf_plot', mem_gb=mem_gb, ) ds_report_asl_conf = pe.Node( DerivativesDataSink( - desc="carpetplot", - datatype="figures", + desc='carpetplot', + datatype='figures', suffix=suffix, - extension="svg", - dismiss_entities=("echo",), + extension='svg', + dismiss_entities=('echo',), ), - name="ds_report_asl_conf", + name='ds_report_asl_conf', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - parcels = pe.Node(niu.Function(function=_carpet_parcellation), name="parcels") + parcels = pe.Node(niu.Function(function=_carpet_parcellation), name='parcels') parcels.inputs.nifti = not cifti_output # List transforms - mrg_xfms = pe.Node(niu.Merge(2), name="mrg_xfms") + mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms') # Warp segmentation into EPI space resample_parc = pe.Node( @@ -410,44 +410,44 @@ def init_carpetplot_wf( dimension=3, input_image=str( get_template( - "MNI152NLin2009cAsym", + 'MNI152NLin2009cAsym', resolution=1, - desc="carpet", - suffix="dseg", - extension=[".nii", ".nii.gz"], + desc='carpet', + suffix='dseg', + extension=['.nii', '.nii.gz'], ), ), - interpolation="GenericLabel", + interpolation='GenericLabel', invert_transform_flags=[True, False], - args="-u int -v", + args='-u int -v', ), - name="resample_parc", + name='resample_parc', ) workflow = Workflow(name=name) if cifti_output: - workflow.connect(inputnode, "cifti_asl", conf_plot, "in_cifti") + workflow.connect(inputnode, 'cifti_asl', conf_plot, 'in_cifti') workflow.connect([ (inputnode, mrg_xfms, [ - ("aslref2anat_xfm", "in1"), - ("std2anat_xfm", "in2"), + ('aslref2anat_xfm', 'in1'), + ('std2anat_xfm', 'in2'), ]), - (inputnode, resample_parc, [("asl_mask", "reference_image")]), + (inputnode, resample_parc, [('asl_mask', 'reference_image')]), (inputnode, parcels, [ - ("crown_mask", "crown_mask"), - ("acompcor_mask", "acompcor_mask"), + ('crown_mask', 'crown_mask'), + ('acompcor_mask', 'acompcor_mask'), ]), (inputnode, conf_plot, [ - ("asl", "in_nifti"), - ("confounds_file", "confounds_file"), - ("dummy_scans", "drop_trs"), + ('asl', 'in_nifti'), + ('confounds_file', 'confounds_file'), + ('dummy_scans', 'drop_trs'), ]), - (mrg_xfms, resample_parc, [("out", "transforms")]), - (resample_parc, parcels, [("output_image", "segmentation")]), - (parcels, conf_plot, [("out", "in_segm")]), - (conf_plot, ds_report_asl_conf, [("out_file", "in_file")]), - (conf_plot, outputnode, [("out_file", "out_carpetplot")]), + (mrg_xfms, resample_parc, [('out', 'transforms')]), + (resample_parc, parcels, [('output_image', 'segmentation')]), + (parcels, conf_plot, [('out', 'in_segm')]), + (conf_plot, ds_report_asl_conf, [('out_file', 'in_file')]), + (conf_plot, outputnode, [('out_file', 'out_carpetplot')]), ]) # fmt:skip return workflow @@ -455,7 +455,7 @@ def init_carpetplot_wf( def init_cbf_confounds_wf( scorescrub=False, basil=False, - name="cbf_confounds_wf", + name='cbf_confounds_wf', ): """Create a workflow for :abbr:`dolui2017automated (compute cbf)`. @@ -510,29 +510,29 @@ def init_cbf_confounds_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "asl_mask", - "t1w_mask", - "t1w_tpms", - "aslref2anat_xfm", - "anat2mni2009c_xfm", + 'name_source', + 'asl_mask', + 't1w_mask', + 't1w_tpms', + 'aslref2anat_xfm', + 'anat2mni2009c_xfm', # CBF inputs - "mean_cbf", + 'mean_cbf', # SCORE/SCRUB inputs - "mean_cbf_score", - "mean_cbf_scrub", + 'mean_cbf_score', + 'mean_cbf_scrub', # BASIL inputs - "mean_cbf_basil", - "mean_cbf_gm_basil", - "mean_cbf_wm_basil", + 'mean_cbf_basil', + 'mean_cbf_gm_basil', + 'mean_cbf_wm_basil', # non-GE inputs - "confounds_file", - "rmsd_file", + 'confounds_file', + 'rmsd_file', ], ), - name="inputnode", + name='inputnode', ) - outputnode = pe.Node(niu.IdentityInterface(fields=["qc_file"]), name="outputnode") + outputnode = pe.Node(niu.IdentityInterface(fields=['qc_file']), name='outputnode') def _pick_gm(files): return files[0] @@ -545,107 +545,107 @@ def _pick_csf(files): gm_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="gm_tfm", + name='gm_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, gm_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_gm), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_gm), 'input_image'), ]), ]) # fmt:skip wm_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="wm_tfm", + name='wm_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, wm_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_wm), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_wm), 'input_image'), ]), ]) # fmt:skip csf_tfm = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="csf_tfm", + name='csf_tfm', mem_gb=0.1, ) workflow.connect([ (inputnode, csf_tfm, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - (("t1w_tpms", _pick_csf), "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + (('t1w_tpms', _pick_csf), 'input_image'), ]), ]) # fmt:skip warp_t1w_mask_to_aslref = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, invert_transform_flags=[True], - args="-v", + args='-v', ), - name="warp_t1w_mask_to_aslref", + name='warp_t1w_mask_to_aslref', mem_gb=0.1, ) workflow.connect([ (inputnode, warp_t1w_mask_to_aslref, [ - ("asl_mask", "reference_image"), - ("aslref2anat_xfm", "transforms"), - ("t1w_mask", "input_image"), + ('asl_mask', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), + ('t1w_mask', 'input_image'), ]), ]) # fmt:skip template_brain_mask = str( - get_template("MNI152NLin2009cAsym", resolution=2, desc="brain", suffix="mask") + get_template('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask') ) - aslref2mni152nlin2009casym = pe.Node(niu.Merge(2), name="aslref2mni152nlin2009casym") + aslref2mni152nlin2009casym = pe.Node(niu.Merge(2), name='aslref2mni152nlin2009casym') workflow.connect([ (inputnode, aslref2mni152nlin2009casym, [ - ("aslref2anat_xfm", "in1"), - ("anat2mni2009c_xfm", "in2"), + ('aslref2anat_xfm', 'in1'), + ('anat2mni2009c_xfm', 'in2'), ]), ]) # fmt:skip warp_asl_mask_to_mni152nlin2009casym = pe.Node( ApplyTransforms( - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', float=True, reference_image=template_brain_mask, - args="-v", + args='-v', ), - name="warp_asl_mask_to_mni152nlin2009casym", + name='warp_asl_mask_to_mni152nlin2009casym', mem_gb=0.1, ) workflow.connect([ - (inputnode, warp_asl_mask_to_mni152nlin2009casym, [("asl_mask", "input_image")]), + (inputnode, warp_asl_mask_to_mni152nlin2009casym, [('asl_mask', 'input_image')]), (aslref2mni152nlin2009casym, warp_asl_mask_to_mni152nlin2009casym, [ - ("out", "transforms"), + ('out', 'transforms'), ]) ]) # fmt:skip @@ -654,43 +654,43 @@ def _pick_csf(files): tpm_threshold=0.7, template_mask=template_brain_mask, ), - name="compute_qc_metrics", + name='compute_qc_metrics', run_without_submitting=True, mem_gb=0.2, ) workflow.connect([ - (warp_t1w_mask_to_aslref, compute_qc_metrics, [("output_image", "t1w_mask")]), + (warp_t1w_mask_to_aslref, compute_qc_metrics, [('output_image', 't1w_mask')]), (inputnode, compute_qc_metrics, [ - ("name_source", "name_source"), - ("asl_mask", "asl_mask"), - ("mean_cbf", "mean_cbf"), - ("confounds_file", "confounds_file"), - ("rmsd_file", "rmsd_file"), + ('name_source', 'name_source'), + ('asl_mask', 'asl_mask'), + ('mean_cbf', 'mean_cbf'), + ('confounds_file', 'confounds_file'), + ('rmsd_file', 'rmsd_file'), ]), (warp_asl_mask_to_mni152nlin2009casym, compute_qc_metrics, [ - ("output_image", "asl_mask_std"), + ('output_image', 'asl_mask_std'), ]), - (gm_tfm, compute_qc_metrics, [("output_image", "gm_tpm")]), - (wm_tfm, compute_qc_metrics, [("output_image", "wm_tpm")]), - (csf_tfm, compute_qc_metrics, [("output_image", "csf_tpm")]), - (compute_qc_metrics, outputnode, [("qc_file", "qc_file")]), + (gm_tfm, compute_qc_metrics, [('output_image', 'gm_tpm')]), + (wm_tfm, compute_qc_metrics, [('output_image', 'wm_tpm')]), + (csf_tfm, compute_qc_metrics, [('output_image', 'csf_tpm')]), + (compute_qc_metrics, outputnode, [('qc_file', 'qc_file')]), ]) # fmt:skip ds_qc = pe.Node( DerivativesDataSink( base_directory=config.execution.aslprep_dir, - desc="qualitycontrol", - suffix="cbf", + desc='qualitycontrol', + suffix='cbf', compress=False, ), - name="ds_qc", + name='ds_qc', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_qc, [("name_source", "source_file")]), - (compute_qc_metrics, ds_qc, [("qc_file", "in_file")]), + (inputnode, ds_qc, [('name_source', 'source_file')]), + (compute_qc_metrics, ds_qc, [('qc_file', 'in_file')]), ]) # fmt:skip ds_qc_metadata = pe.Node( @@ -698,32 +698,32 @@ def _pick_csf(files): base_directory=config.execution.aslprep_dir, dismiss_entities=list(DerivativesDataSink._allowed_entities), allowed_entities=[], - desc="qualitycontrol", - suffix="cbf", - extension=".json", + desc='qualitycontrol', + suffix='cbf', + extension='.json', ), - name="ds_qc_metadata", + name='ds_qc_metadata', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_qc_metadata, [("name_source", "source_file")]), - (compute_qc_metrics, ds_qc_metadata, [("qc_metadata", "in_file")]), + (inputnode, ds_qc_metadata, [('name_source', 'source_file')]), + (compute_qc_metrics, ds_qc_metadata, [('qc_metadata', 'in_file')]), ]) # fmt:skip if scorescrub: workflow.connect([ (inputnode, compute_qc_metrics, [ - ("mean_cbf_scrub", "mean_cbf_scrub"), - ("mean_cbf_score", "mean_cbf_score"), + ('mean_cbf_scrub', 'mean_cbf_scrub'), + ('mean_cbf_score', 'mean_cbf_score'), ]), ]) # fmt:skip if basil: workflow.connect([ (inputnode, compute_qc_metrics, [ - ("mean_cbf_basil", "mean_cbf_basil"), - ("mean_cbf_gm_basil", "mean_cbf_gm_basil"), + ('mean_cbf_basil', 'mean_cbf_basil'), + ('mean_cbf_gm_basil', 'mean_cbf_gm_basil'), ]), ]) # fmt:skip @@ -741,8 +741,8 @@ def _binary_union(mask1, mask2): mskarr1 = np.asanyarray(img.dataobj, dtype=int) > 0 mskarr2 = np.asanyarray(nb.load(mask2).dataobj, dtype=int) > 0 out = img.__class__(mskarr1 | mskarr2, img.affine, img.header) - out.set_data_dtype("uint8") - out_name = Path("mask_union.nii.gz").absolute() + out.set_data_dtype('uint8') + out_name = Path('mask_union.nii.gz').absolute() out.to_filename(out_name) return str(out_name) diff --git a/aslprep/workflows/asl/fit.py b/aslprep/workflows/asl/fit.py index 1f891ef2e..2b11a6471 100644 --- a/aslprep/workflows/asl/fit.py +++ b/aslprep/workflows/asl/fit.py @@ -57,9 +57,9 @@ def get_sbrefs( asl_file: str, - entity_overrides: ty.Dict[str, ty.Any], + entity_overrides: dict[str, ty.Any], layout: bids.BIDSLayout, -) -> ty.List[str]: +) -> list[str]: """Find single-band reference(s) associated with ASL file. Parameters @@ -78,20 +78,20 @@ def get_sbrefs( sorted by EchoTime """ entities = extract_entities(asl_file) - entities.update(suffix="sbref", extension=[".nii", ".nii.gz"], **entity_overrides) + entities.update(suffix='sbref', extension=['.nii', '.nii.gz'], **entity_overrides) - return layout.get(return_type="file", **entities) + return layout.get(return_type='file', **entities) def init_asl_fit_wf( *, asl_file: str, - m0scan: ty.Union[str, None], + m0scan: str | None, use_ge: bool, precomputed: dict = {}, - fieldmap_id: ty.Optional[str] = None, + fieldmap_id: str | None = None, omp_nthreads: int = 1, - name: str = "asl_fit_wf", + name: str = 'asl_fit_wf', ) -> pe.Workflow: """Control the minimal estimation steps for functional preprocessing. @@ -199,16 +199,16 @@ def init_asl_fit_wf( # sbrefs aren't supported for ASL data, but I think that might change in the future. sbref_file = get_sbrefs( asl_file, - entity_overrides=config.execution.get().get("bids_filters", {}).get("sbref", {}), + entity_overrides=config.execution.get().get('bids_filters', {}).get('sbref', {}), layout=layout, ) basename = os.path.basename(asl_file) - sbref_msg = f"No single-band-reference found for {basename}." - if sbref_file and "sbref" in config.workflow.ignore: - sbref_msg = f"Single-band reference file(s) found for {basename} and ignored." + sbref_msg = f'No single-band-reference found for {basename}.' + if sbref_file and 'sbref' in config.workflow.ignore: + sbref_msg = f'Single-band reference file(s) found for {basename} and ignored.' sbref_file = [] elif sbref_file: - sbref_msg = f"Using single-band reference file(s) {os.path.basename(sbref_file)}." + sbref_msg = f'Using single-band reference file(s) {os.path.basename(sbref_file)}.' config.loggers.workflow.info(sbref_msg) # Get metadata from ASL file(s) @@ -216,52 +216,52 @@ def init_asl_fit_wf( # Patch RepetitionTimePreparation into RepetitionTime, # for the sake of BOLD-based interfaces and workflows. # This value shouldn't be used for anything except figures and reportlets. - metadata["RepetitionTime"] = metadata.get( - "RepetitionTime", - np.mean(metadata["RepetitionTimePreparation"]), + metadata['RepetitionTime'] = metadata.get( + 'RepetitionTime', + np.mean(metadata['RepetitionTimePreparation']), ) - orientation = "".join(nb.aff2axcodes(nb.load(asl_file).affine)) + orientation = ''.join(nb.aff2axcodes(nb.load(asl_file).affine)) _, mem_gb = estimate_asl_mem_usage(asl_file) - have_hmcref = "hmc_aslref" in precomputed - have_coregref = "coreg_aslref" in precomputed + have_hmcref = 'hmc_aslref' in precomputed + have_coregref = 'coreg_aslref' in precomputed # Can contain # 1) aslref2fmap # 2) aslref2anat # 3) hmc - transforms = precomputed.get("transforms", {}) - hmc_xforms = transforms.get("hmc") - aslref2fmap_xform = transforms.get("aslref2fmap") - aslref2anat_xform = transforms.get("aslref2anat") + transforms = precomputed.get('transforms', {}) + hmc_xforms = transforms.get('hmc') + aslref2fmap_xform = transforms.get('aslref2fmap') + aslref2anat_xform = transforms.get('aslref2anat') workflow = Workflow(name=name) inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_file", - "aslcontext", + 'asl_file', + 'aslcontext', # Fieldmap registration - "fmap", - "fmap_ref", - "fmap_coeff", - "fmap_mask", - "fmap_id", - "sdc_method", + 'fmap', + 'fmap_ref', + 'fmap_coeff', + 'fmap_mask', + 'fmap_id', + 'sdc_method', # Anatomical coregistration - "t1w_preproc", - "t1w_mask", - "t1w_dseg", - "subjects_dir", - "subject_id", - "fsnative2t1w_xfm", + 't1w_preproc', + 't1w_mask', + 't1w_dseg', + 'subjects_dir', + 'subject_id', + 'fsnative2t1w_xfm', # Other things - "dummy_scans", + 'dummy_scans', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.asl_file = asl_file inputnode.inputs.dummy_scans = config.workflow.dummy_scans @@ -269,166 +269,166 @@ def init_asl_fit_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "dummy_scans", - "hmc_aslref", - "coreg_aslref", - "asl_mask", - "motion_xfm", - "aslref2anat_xfm", - "aslref2fmap_xfm", - "movpar_file", # motion parameters file, for confounds and plots - "rmsd_file", + 'dummy_scans', + 'hmc_aslref', + 'coreg_aslref', + 'asl_mask', + 'motion_xfm', + 'aslref2anat_xfm', + 'aslref2fmap_xfm', + 'movpar_file', # motion parameters file, for confounds and plots + 'rmsd_file', ], ), - name="outputnode", + name='outputnode', ) # If all derivatives exist, inputnode could go unconnected, so add explicitly workflow.add_nodes([inputnode]) hmcref_buffer = pe.Node( - niu.IdentityInterface(fields=["aslref", "asl_file", "dummy_scans"]), - name="hmcref_buffer", + niu.IdentityInterface(fields=['aslref', 'asl_file', 'dummy_scans']), + name='hmcref_buffer', ) - fmapref_buffer = pe.Node(niu.Function(function=_select_ref), name="fmapref_buffer") - hmc_buffer = pe.Node(niu.IdentityInterface(fields=["hmc_xforms"]), name="hmc_buffer") + fmapref_buffer = pe.Node(niu.Function(function=_select_ref), name='fmapref_buffer') + hmc_buffer = pe.Node(niu.IdentityInterface(fields=['hmc_xforms']), name='hmc_buffer') fmapreg_buffer = pe.Node( - niu.IdentityInterface(fields=["aslref2fmap_xfm"]), - name="fmapreg_buffer", + niu.IdentityInterface(fields=['aslref2fmap_xfm']), + name='fmapreg_buffer', ) regref_buffer = pe.Node( - niu.IdentityInterface(fields=["aslref", "aslmask"]), - name="regref_buffer", + niu.IdentityInterface(fields=['aslref', 'aslmask']), + name='regref_buffer', ) summary = pe.Node( FunctionalSummary( - distortion_correction="None", # Can override with connection - registration=("FSL", "FreeSurfer")[config.workflow.run_reconall], + distortion_correction='None', # Can override with connection + registration=('FSL', 'FreeSurfer')[config.workflow.run_reconall], registration_dof=config.workflow.asl2t1w_dof, registration_init=config.workflow.asl2t1w_init, - pe_direction=metadata.get("PhaseEncodingDirection"), - tr=metadata["RepetitionTime"], + pe_direction=metadata.get('PhaseEncodingDirection'), + tr=metadata['RepetitionTime'], orientation=orientation, ), - name="summary", + name='summary', mem_gb=config.DEFAULT_MEMORY_MIN_GB, run_without_submitting=True, ) # workflow.connect([(inputnode, summary, [("dummy_scans", "dummy_scans")])]) asl_fit_reports_wf = init_asl_fit_reports_wf( - sdc_correction=not (fieldmap_id is None), + sdc_correction=fieldmap_id is not None, freesurfer=config.workflow.run_reconall, output_dir=config.execution.aslprep_dir, ) workflow.connect([ # XXX: Was from hmc_aslref_wf - (inputnode, hmcref_buffer, [("dummy_scans", "dummy_scans")]), + (inputnode, hmcref_buffer, [('dummy_scans', 'dummy_scans')]), (hmcref_buffer, outputnode, [ - ("aslref", "hmc_aslref"), - ("dummy_scans", "dummy_scans"), + ('aslref', 'hmc_aslref'), + ('dummy_scans', 'dummy_scans'), ]), (regref_buffer, outputnode, [ - ("aslref", "coreg_aslref"), - ("aslmask", "asl_mask"), + ('aslref', 'coreg_aslref'), + ('aslmask', 'asl_mask'), ]), - (fmapreg_buffer, outputnode, [("aslref2fmap_xfm", "aslref2fmap_xfm")]), + (fmapreg_buffer, outputnode, [('aslref2fmap_xfm', 'aslref2fmap_xfm')]), (hmc_buffer, outputnode, [ - ("hmc_xforms", "motion_xfm"), - ("movpar_file", "movpar_file"), - ("rmsd_file", "rmsd_file"), + ('hmc_xforms', 'motion_xfm'), + ('movpar_file', 'movpar_file'), + ('rmsd_file', 'rmsd_file'), ]), (inputnode, asl_fit_reports_wf, [ - ("asl_file", "inputnode.source_file"), - ("t1w_preproc", "inputnode.t1w_preproc"), + ('asl_file', 'inputnode.source_file'), + ('t1w_preproc', 'inputnode.t1w_preproc'), # May not need all of these - ("t1w_mask", "inputnode.t1w_mask"), - ("t1w_dseg", "inputnode.t1w_dseg"), - ("subjects_dir", "inputnode.subjects_dir"), - ("subject_id", "inputnode.subject_id"), + ('t1w_mask', 'inputnode.t1w_mask'), + ('t1w_dseg', 'inputnode.t1w_dseg'), + ('subjects_dir', 'inputnode.subjects_dir'), + ('subject_id', 'inputnode.subject_id'), ]), (outputnode, asl_fit_reports_wf, [ - ("coreg_aslref", "inputnode.coreg_aslref"), - ("aslref2anat_xfm", "inputnode.aslref2anat_xfm"), + ('coreg_aslref', 'inputnode.coreg_aslref'), + ('aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), ]), - (summary, asl_fit_reports_wf, [("out_report", "inputnode.summary_report")]), + (summary, asl_fit_reports_wf, [('out_report', 'inputnode.summary_report')]), ]) # fmt:skip # Stage 1: Generate motion correction aslref if not have_hmcref: - config.loggers.workflow.info("Stage 1: Adding HMC aslref workflow") + config.loggers.workflow.info('Stage 1: Adding HMC aslref workflow') hmc_aslref_wf = init_raw_aslref_wf( - name="hmc_aslref_wf", + name='hmc_aslref_wf', asl_file=asl_file, - m0scan=(metadata["M0Type"] == "Separate"), + m0scan=(metadata['M0Type'] == 'Separate'), use_ge=use_ge, ) hmc_aslref_wf.inputs.inputnode.m0scan = m0scan hmc_aslref_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans - workflow.connect([(inputnode, hmc_aslref_wf, [("aslcontext", "inputnode.aslcontext")])]) + workflow.connect([(inputnode, hmc_aslref_wf, [('aslcontext', 'inputnode.aslcontext')])]) ds_hmc_aslref_wf = init_ds_aslref_wf( bids_root=layout.root, output_dir=config.execution.aslprep_dir, - desc="hmc", - name="ds_hmc_aslref_wf", + desc='hmc', + name='ds_hmc_aslref_wf', ) ds_hmc_aslref_wf.inputs.inputnode.source_files = [asl_file] workflow.connect([ (hmc_aslref_wf, hmcref_buffer, [ - ("outputnode.asl_file", "asl_file"), - ("outputnode.aslref", "aslref"), + ('outputnode.asl_file', 'asl_file'), + ('outputnode.aslref', 'aslref'), ]), - (hmcref_buffer, ds_hmc_aslref_wf, [("aslref", "inputnode.aslref")]), + (hmcref_buffer, ds_hmc_aslref_wf, [('aslref', 'inputnode.aslref')]), (hmc_aslref_wf, asl_fit_reports_wf, [ - ("outputnode.validation_report", "inputnode.validation_report"), + ('outputnode.validation_report', 'inputnode.validation_report'), ]), ]) # fmt:skip else: - config.loggers.workflow.info("Found HMC aslref - skipping Stage 1") + config.loggers.workflow.info('Found HMC aslref - skipping Stage 1') - validate_asl = pe.Node(ValidateImage(), name="validate_asl") + validate_asl = pe.Node(ValidateImage(), name='validate_asl') validate_asl.inputs.in_file = asl_file - hmcref_buffer.inputs.aslref = precomputed["hmc_aslref"] + hmcref_buffer.inputs.aslref = precomputed['hmc_aslref'] workflow.connect([ - (validate_asl, hmcref_buffer, [("out_file", "asl_file")]), - (validate_asl, asl_fit_reports_wf, [("out_report", "inputnode.validation_report")]), + (validate_asl, hmcref_buffer, [('out_file', 'asl_file')]), + (validate_asl, asl_fit_reports_wf, [('out_report', 'inputnode.validation_report')]), ]) # fmt:skip # Reduce the ASL series to only include volumes that need to be processed. processing_target = pe.Node( niu.Function( function=select_processing_target, - input_names=["aslcontext"], - output_names=["processing_target"], + input_names=['aslcontext'], + output_names=['processing_target'], ), - name="processing_target", + name='processing_target', ) reduce_asl_file = pe.Node( ReduceASLFiles(metadata=metadata), - name="reduce_asl_file", + name='reduce_asl_file', ) workflow.connect([ - (inputnode, processing_target, [("aslcontext", "aslcontext")]), - (inputnode, reduce_asl_file, [("aslcontext", "aslcontext")]), - (processing_target, reduce_asl_file, [("processing_target", "processing_target")]), - (hmcref_buffer, reduce_asl_file, [("asl_file", "asl_file")]), + (inputnode, processing_target, [('aslcontext', 'aslcontext')]), + (inputnode, reduce_asl_file, [('aslcontext', 'aslcontext')]), + (processing_target, reduce_asl_file, [('processing_target', 'processing_target')]), + (hmcref_buffer, reduce_asl_file, [('asl_file', 'asl_file')]), ]) # fmt:skip # Stage 2: Estimate head motion if not hmc_xforms: - config.loggers.workflow.info("Stage 2: Adding motion correction workflow") + config.loggers.workflow.info('Stage 2: Adding motion correction workflow') asl_hmc_wf = init_asl_hmc_wf( - name="asl_hmc_wf", - mem_gb=mem_gb["filesize"], + name='asl_hmc_wf', + mem_gb=mem_gb['filesize'], omp_nthreads=omp_nthreads, ) @@ -438,75 +438,75 @@ def init_asl_fit_wf( output_dir=config.execution.aslprep_dir, ) - ds_hmc_wf.get_node("inputnode").inputs.source_files = [asl_file] + ds_hmc_wf.get_node('inputnode').inputs.source_files = [asl_file] # fMRIPrep will write out an orig-to-boldref transform to anat, so we need to overwrite # some fields. - ds_hmc_wf.get_node("ds_xforms").inputs.datatype = "perf" - ds_hmc_wf.get_node("ds_xforms").inputs.to = "aslref" + ds_hmc_wf.get_node('ds_xforms').inputs.datatype = 'perf' + ds_hmc_wf.get_node('ds_xforms').inputs.to = 'aslref' workflow.connect([ - (hmcref_buffer, asl_hmc_wf, [("aslref", "inputnode.raw_ref_image")]), + (hmcref_buffer, asl_hmc_wf, [('aslref', 'inputnode.raw_ref_image')]), (reduce_asl_file, asl_hmc_wf, [ - ("asl_file", "inputnode.asl_file"), - ("aslcontext", "inputnode.aslcontext"), + ('asl_file', 'inputnode.asl_file'), + ('aslcontext', 'inputnode.aslcontext'), ]), - (asl_hmc_wf, ds_hmc_wf, [("outputnode.xforms", "inputnode.xforms")]), + (asl_hmc_wf, ds_hmc_wf, [('outputnode.xforms', 'inputnode.xforms')]), (asl_hmc_wf, hmc_buffer, [ - ("outputnode.xforms", "hmc_xforms"), - ("outputnode.movpar_file", "movpar_file"), - ("outputnode.rmsd_file", "rmsd_file"), + ('outputnode.xforms', 'hmc_xforms'), + ('outputnode.movpar_file', 'movpar_file'), + ('outputnode.rmsd_file', 'rmsd_file'), ]), ]) # fmt:skip else: - config.loggers.workflow.info("Found motion correction transforms - skipping Stage 2") + config.loggers.workflow.info('Found motion correction transforms - skipping Stage 2') hmc_buffer.inputs.hmc_xforms = hmc_xforms # Stage 3: Create coregistration reference # Fieldmap correction only happens during fit if this stage is needed if not have_coregref: - config.loggers.workflow.info("Stage 3: Adding coregistration aslref workflow") + config.loggers.workflow.info('Stage 3: Adding coregistration aslref workflow') # Select initial aslref, enhance contrast, and generate mask fmapref_buffer.inputs.sbref_file = sbref_file enhance_aslref_wf = init_enhance_and_skullstrip_bold_wf( pre_mask=False, - name="enhance_aslref_wf", + name='enhance_aslref_wf', ) ds_coreg_aslref_wf = init_ds_aslref_wf( bids_root=layout.root, output_dir=config.execution.aslprep_dir, - desc="coreg", - name="ds_coreg_aslref_wf", + desc='coreg', + name='ds_coreg_aslref_wf', ) workflow.connect([ - (hmcref_buffer, fmapref_buffer, [("aslref", "aslref_files")]), - (fmapref_buffer, enhance_aslref_wf, [("out", "inputnode.in_file")]), - (fmapref_buffer, ds_coreg_aslref_wf, [("out", "inputnode.source_files")]), - (ds_coreg_aslref_wf, regref_buffer, [("outputnode.aslref", "aslref")]), - (fmapref_buffer, asl_fit_reports_wf, [("out", "inputnode.sdc_aslref")]), + (hmcref_buffer, fmapref_buffer, [('aslref', 'aslref_files')]), + (fmapref_buffer, enhance_aslref_wf, [('out', 'inputnode.in_file')]), + (fmapref_buffer, ds_coreg_aslref_wf, [('out', 'inputnode.source_files')]), + (ds_coreg_aslref_wf, regref_buffer, [('outputnode.aslref', 'aslref')]), + (fmapref_buffer, asl_fit_reports_wf, [('out', 'inputnode.sdc_aslref')]), ]) # fmt:skip if fieldmap_id: fmap_select = pe.Node( KeySelect( - fields=["fmap_ref", "fmap_coeff", "fmap_mask", "sdc_method"], + fields=['fmap_ref', 'fmap_coeff', 'fmap_mask', 'sdc_method'], key=fieldmap_id, ), - name="fmap_select", + name='fmap_select', run_without_submitting=True, ) if not aslref2fmap_xform: fmapreg_wf = init_coeff2epi_wf( - debug="fieldmaps" in config.execution.debug, + debug='fieldmaps' in config.execution.debug, omp_nthreads=config.nipype.omp_nthreads, sloppy=config.execution.sloppy, - name="fmapreg_wf", + name='fmapreg_wf', ) - itk_mat2txt = pe.Node(ConcatenateXFMs(out_fmt="itk"), name="itk_mat2txt") + itk_mat2txt = pe.Node(ConcatenateXFMs(out_fmt='itk'), name='itk_mat2txt') # fMRIPrep's init_ds_registration_wf will write out the ASL xfms to `anat` for # some reason, so we must override it. @@ -514,73 +514,73 @@ def init_asl_fit_wf( ds_fmapreg_wf = output_workflows.init_ds_registration_wf( bids_root=layout.root, output_dir=config.execution.aslprep_dir, - source="aslref", - dest=fieldmap_id.replace("_", ""), - name="ds_fmapreg_wf", + source='aslref', + dest=fieldmap_id.replace('_', ''), + name='ds_fmapreg_wf', ) workflow.connect([ (enhance_aslref_wf, fmapreg_wf, [ - ("outputnode.bias_corrected_file", "inputnode.target_ref"), - ("outputnode.mask_file", "inputnode.target_mask"), + ('outputnode.bias_corrected_file', 'inputnode.target_ref'), + ('outputnode.mask_file', 'inputnode.target_mask'), ]), (fmap_select, fmapreg_wf, [ - ("fmap_ref", "inputnode.fmap_ref"), - ("fmap_mask", "inputnode.fmap_mask"), + ('fmap_ref', 'inputnode.fmap_ref'), + ('fmap_mask', 'inputnode.fmap_mask'), ]), - (fmapreg_wf, itk_mat2txt, [("outputnode.target2fmap_xfm", "in_xfms")]), - (itk_mat2txt, ds_fmapreg_wf, [("out_xfm", "inputnode.xform")]), - (fmapref_buffer, ds_fmapreg_wf, [("out", "inputnode.source_files")]), - (ds_fmapreg_wf, fmapreg_buffer, [("outputnode.xform", "aslref2fmap_xfm")]), + (fmapreg_wf, itk_mat2txt, [('outputnode.target2fmap_xfm', 'in_xfms')]), + (itk_mat2txt, ds_fmapreg_wf, [('out_xfm', 'inputnode.xform')]), + (fmapref_buffer, ds_fmapreg_wf, [('out', 'inputnode.source_files')]), + (ds_fmapreg_wf, fmapreg_buffer, [('outputnode.xform', 'aslref2fmap_xfm')]), ]) # fmt:skip else: fmapreg_buffer.inputs.aslref2fmap_xfm = aslref2fmap_xform unwarp_wf = init_unwarp_wf( free_mem=config.environment.free_mem, - debug="fieldmaps" in config.execution.debug, + debug='fieldmaps' in config.execution.debug, omp_nthreads=config.nipype.omp_nthreads, ) unwarp_wf.inputs.inputnode.metadata = layout.get_metadata(asl_file) workflow.connect([ (inputnode, fmap_select, [ - ("fmap_ref", "fmap_ref"), - ("fmap_coeff", "fmap_coeff"), - ("fmap_mask", "fmap_mask"), - ("sdc_method", "sdc_method"), - ("fmap_id", "keys"), + ('fmap_ref', 'fmap_ref'), + ('fmap_coeff', 'fmap_coeff'), + ('fmap_mask', 'fmap_mask'), + ('sdc_method', 'sdc_method'), + ('fmap_id', 'keys'), ]), - (fmap_select, unwarp_wf, [("fmap_coeff", "inputnode.fmap_coeff")]), + (fmap_select, unwarp_wf, [('fmap_coeff', 'inputnode.fmap_coeff')]), (fmapreg_buffer, unwarp_wf, [ # This looks backwards, but unwarp_wf describes transforms in # terms of points while we (and init_coeff2epi_wf) describe them # in terms of images. Mapping fieldmap coordinates into aslref # coordinates maps the aslref image onto the fieldmap image. - ("aslref2fmap_xfm", "inputnode.fmap2data_xfm"), + ('aslref2fmap_xfm', 'inputnode.fmap2data_xfm'), ]), (enhance_aslref_wf, unwarp_wf, [ - ("outputnode.bias_corrected_file", "inputnode.distorted"), + ('outputnode.bias_corrected_file', 'inputnode.distorted'), ]), - (unwarp_wf, ds_coreg_aslref_wf, [("outputnode.corrected", "inputnode.aslref")]), - (unwarp_wf, regref_buffer, [("outputnode.corrected_mask", "aslmask")]), - (fmap_select, asl_fit_reports_wf, [("fmap_ref", "inputnode.fmap_ref")]), - (fmap_select, summary, [("sdc_method", "distortion_correction")]), + (unwarp_wf, ds_coreg_aslref_wf, [('outputnode.corrected', 'inputnode.aslref')]), + (unwarp_wf, regref_buffer, [('outputnode.corrected_mask', 'aslmask')]), + (fmap_select, asl_fit_reports_wf, [('fmap_ref', 'inputnode.fmap_ref')]), + (fmap_select, summary, [('sdc_method', 'distortion_correction')]), (fmapreg_buffer, asl_fit_reports_wf, [ - ("aslref2fmap_xfm", "inputnode.aslref2fmap_xfm"), + ('aslref2fmap_xfm', 'inputnode.aslref2fmap_xfm'), ]), - (unwarp_wf, asl_fit_reports_wf, [("outputnode.fieldmap", "inputnode.fieldmap")]), + (unwarp_wf, asl_fit_reports_wf, [('outputnode.fieldmap', 'inputnode.fieldmap')]), ]) # fmt:skip else: workflow.connect([ (enhance_aslref_wf, ds_coreg_aslref_wf, [ - ("outputnode.bias_corrected_file", "inputnode.aslref"), + ('outputnode.bias_corrected_file', 'inputnode.aslref'), ]), - (enhance_aslref_wf, regref_buffer, [("outputnode.mask_file", "aslmask")]), + (enhance_aslref_wf, regref_buffer, [('outputnode.mask_file', 'aslmask')]), ]) # fmt:skip else: - config.loggers.workflow.info("Found coregistration reference - skipping Stage 3") - regref_buffer.inputs.aslref = precomputed["coreg_aslref"] + config.loggers.workflow.info('Found coregistration reference - skipping Stage 3') + regref_buffer.inputs.aslref = precomputed['coreg_aslref'] if not aslref2anat_xform: # calculate ASL registration to T1w @@ -588,8 +588,8 @@ def init_asl_fit_wf( bold2t1w_dof=config.workflow.asl2t1w_dof, bold2t1w_init=config.workflow.asl2t1w_init, freesurfer=config.workflow.run_reconall, - mem_gb=mem_gb["resampled"], - name="asl_reg_wf", + mem_gb=mem_gb['resampled'], + name='asl_reg_wf', omp_nthreads=omp_nthreads, sloppy=config.execution.sloppy, use_bbr=config.workflow.use_bbr, @@ -601,27 +601,27 @@ def init_asl_fit_wf( ds_aslreg_wf = output_workflows.init_ds_registration_wf( bids_root=layout.root, output_dir=config.execution.aslprep_dir, - source="aslref", - dest="T1w", - name="ds_aslreg_wf", + source='aslref', + dest='T1w', + name='ds_aslreg_wf', ) workflow.connect([ (inputnode, asl_reg_wf, [ - ("t1w_preproc", "inputnode.t1w_preproc"), - ("t1w_mask", "inputnode.t1w_mask"), - ("t1w_dseg", "inputnode.t1w_dseg"), + ('t1w_preproc', 'inputnode.t1w_preproc'), + ('t1w_mask', 'inputnode.t1w_mask'), + ('t1w_dseg', 'inputnode.t1w_dseg'), # Undefined if --fs-no-reconall, but this is safe - ("subjects_dir", "inputnode.subjects_dir"), - ("subject_id", "inputnode.subject_id"), - ("fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), + ('subjects_dir', 'inputnode.subjects_dir'), + ('subject_id', 'inputnode.subject_id'), + ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'), ]), - (regref_buffer, asl_reg_wf, [("aslref", "inputnode.ref_bold_brain")]), + (regref_buffer, asl_reg_wf, [('aslref', 'inputnode.ref_bold_brain')]), # Incomplete sources - (regref_buffer, ds_aslreg_wf, [("aslref", "inputnode.source_files")]), - (asl_reg_wf, ds_aslreg_wf, [("outputnode.itk_bold_to_t1", "inputnode.xform")]), - (ds_aslreg_wf, outputnode, [("outputnode.xform", "aslref2anat_xfm")]), - (asl_reg_wf, summary, [("outputnode.fallback", "fallback")]), + (regref_buffer, ds_aslreg_wf, [('aslref', 'inputnode.source_files')]), + (asl_reg_wf, ds_aslreg_wf, [('outputnode.itk_bold_to_t1', 'inputnode.xform')]), + (ds_aslreg_wf, outputnode, [('outputnode.xform', 'aslref2anat_xfm')]), + (asl_reg_wf, summary, [('outputnode.fallback', 'fallback')]), ]) # fmt:skip else: outputnode.inputs.aslref2anat_xfm = aslref2anat_xform @@ -632,10 +632,10 @@ def init_asl_fit_wf( def init_asl_native_wf( *, asl_file: str, - m0scan: ty.Optional[str] = None, - fieldmap_id: ty.Optional[str] = None, + m0scan: str | None = None, + fieldmap_id: str | None = None, omp_nthreads: int = 1, - name: str = "asl_native_wf", + name: str = 'asl_native_wf', ) -> pe.Workflow: r"""Apply minimal resampling workflow. @@ -719,140 +719,140 @@ def init_asl_native_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "aslcontext", + 'aslcontext', # ASL fit - "aslref", - "asl_mask", - "m0scan", - "motion_xfm", - "aslref2fmap_xfm", - "dummy_scans", + 'aslref', + 'asl_mask', + 'm0scan', + 'motion_xfm', + 'aslref2fmap_xfm', + 'dummy_scans', # Fieldmap fit - "fmap_ref", - "fmap_coeff", - "fmap_id", + 'fmap_ref', + 'fmap_coeff', + 'fmap_id', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_minimal", - "asl_native", - "m0scan_native", - "aslcontext", - "metadata", + 'asl_minimal', + 'asl_native', + 'm0scan_native', + 'aslcontext', + 'metadata', # Transforms - "motion_xfm", + 'motion_xfm', ], ), - name="outputnode", + name='outputnode', ) aslbuffer = pe.Node( - niu.IdentityInterface(fields=["asl_file", "ro_time", "pe_dir"]), - name="aslbuffer", + niu.IdentityInterface(fields=['asl_file', 'ro_time', 'pe_dir']), + name='aslbuffer', ) # Validate the ASL file - validate_asl = pe.Node(ValidateImage(in_file=asl_file), name="validate_asl") + validate_asl = pe.Node(ValidateImage(in_file=asl_file), name='validate_asl') # Drop volumes in the ASL file that won't be used # (e.g., precalculated CBF volumes if control-label pairs are available). processing_target = pe.Node( niu.Function( function=select_processing_target, - input_names=["aslcontext"], - output_names=["processing_target"], + input_names=['aslcontext'], + output_names=['processing_target'], ), - name="processing_target", + name='processing_target', ) reduce_asl_file = pe.Node( ReduceASLFiles(metadata=metadata), - name="reduce_asl_file", + name='reduce_asl_file', ) workflow.connect([ - (inputnode, processing_target, [("aslcontext", "aslcontext")]), - (processing_target, reduce_asl_file, [("processing_target", "processing_target")]), - (inputnode, reduce_asl_file, [("aslcontext", "aslcontext")]), - (validate_asl, reduce_asl_file, [("out_file", "asl_file")]), - (reduce_asl_file, aslbuffer, [("asl_file", "asl_file")]), + (inputnode, processing_target, [('aslcontext', 'aslcontext')]), + (processing_target, reduce_asl_file, [('processing_target', 'processing_target')]), + (inputnode, reduce_asl_file, [('aslcontext', 'aslcontext')]), + (validate_asl, reduce_asl_file, [('out_file', 'asl_file')]), + (reduce_asl_file, aslbuffer, [('asl_file', 'asl_file')]), (reduce_asl_file, outputnode, [ - ("aslcontext", "aslcontext"), - ("metadata", "metadata"), + ('aslcontext', 'aslcontext'), + ('metadata', 'metadata'), ]), ]) # fmt:skip # Prepare fieldmap metadata if fieldmap_id: fmap_select = pe.Node( - KeySelect(fields=["fmap_ref", "fmap_coeff"], key=fieldmap_id), - name="fmap_select", + KeySelect(fields=['fmap_ref', 'fmap_coeff'], key=fieldmap_id), + name='fmap_select', run_without_submitting=True, ) distortion_params = pe.Node( DistortionParameters(), - name="distortion_params", + name='distortion_params', run_without_submitting=True, ) workflow.connect([ (inputnode, fmap_select, [ - ("fmap_ref", "fmap_ref"), - ("fmap_coeff", "fmap_coeff"), - ("fmap_id", "keys"), + ('fmap_ref', 'fmap_ref'), + ('fmap_coeff', 'fmap_coeff'), + ('fmap_id', 'keys'), ]), (reduce_asl_file, distortion_params, [ - ("metadata", "metadata"), - ("asl_file", "in_file"), + ('metadata', 'metadata'), + ('asl_file', 'in_file'), ]), (distortion_params, aslbuffer, [ - ("readout_time", "ro_time"), - ("pe_direction", "pe_dir"), + ('readout_time', 'ro_time'), + ('pe_direction', 'pe_dir'), ]), ]) # fmt:skip # Resample ASL to aslref aslref_asl = pe.Node( - ResampleSeries(jacobian="fmap-jacobian" not in config.workflow.ignore), - name="aslref_asl", + ResampleSeries(jacobian='fmap-jacobian' not in config.workflow.ignore), + name='aslref_asl', n_procs=omp_nthreads, - mem_gb=mem_gb["resampled"], + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, aslref_asl, [ - ("aslref", "ref_file"), - ("motion_xfm", "transforms"), + ('aslref', 'ref_file'), + ('motion_xfm', 'transforms'), ]), (aslbuffer, aslref_asl, [ - ("asl_file", "in_file"), - ("ro_time", "ro_time"), - ("pe_dir", "pe_dir"), + ('asl_file', 'in_file'), + ('ro_time', 'ro_time'), + ('pe_dir', 'pe_dir'), ]), ]) # fmt:skip if fieldmap_id: - aslref_fmap = pe.Node(ReconstructFieldmap(inverse=[True]), name="aslref_fmap", mem_gb=1) + aslref_fmap = pe.Node(ReconstructFieldmap(inverse=[True]), name='aslref_fmap', mem_gb=1) workflow.connect([ (inputnode, aslref_fmap, [ - ("aslref", "target_ref_file"), - ("aslref2fmap_xfm", "transforms"), + ('aslref', 'target_ref_file'), + ('aslref2fmap_xfm', 'transforms'), ]), (fmap_select, aslref_fmap, [ - ("fmap_coeff", "in_coeffs"), - ("fmap_ref", "fmap_ref_file"), + ('fmap_coeff', 'in_coeffs'), + ('fmap_ref', 'fmap_ref_file'), ]), - (aslref_fmap, aslref_asl, [("out_file", "fieldmap")]), + (aslref_fmap, aslref_asl, [('out_file', 'fieldmap')]), ]) # fmt:skip workflow.connect([ - (inputnode, outputnode, [("motion_xfm", "motion_xfm")]), - (aslbuffer, outputnode, [("asl_file", "asl_minimal")]), - (aslref_asl, outputnode, [("out_file", "asl_native")]), + (inputnode, outputnode, [('motion_xfm', 'motion_xfm')]), + (aslbuffer, outputnode, [('asl_file', 'asl_minimal')]), + (aslref_asl, outputnode, [('out_file', 'asl_native')]), ]) # fmt:skip if m0scan: @@ -860,24 +860,24 @@ def init_asl_native_wf( # Resample separate M0 file to aslref # No HMC - identity_xfm = nw_data.load("itkIdentityTransform.txt") + identity_xfm = nw_data.load('itkIdentityTransform.txt') aslref_m0scan = pe.Node( ResampleSeries( - jacobian="fmap-jacobian" not in config.workflow.ignore, + jacobian='fmap-jacobian' not in config.workflow.ignore, transforms=[identity_xfm], in_file=m0scan, ), - name="aslref_m0scan", + name='aslref_m0scan', n_procs=omp_nthreads, ) workflow.connect([ - (inputnode, aslref_m0scan, [("aslref", "ref_file")]), + (inputnode, aslref_m0scan, [('aslref', 'ref_file')]), (aslbuffer, aslref_m0scan, [ - ("ro_time", "ro_time"), - ("pe_dir", "pe_dir"), + ('ro_time', 'ro_time'), + ('pe_dir', 'pe_dir'), ]), - (aslref_m0scan, outputnode, [("out_file", "m0scan_native")]), + (aslref_m0scan, outputnode, [('out_file', 'm0scan_native')]), ]) # fmt:skip return workflow diff --git a/aslprep/workflows/asl/hmc.py b/aslprep/workflows/asl/hmc.py index 3f2390d40..1fc1415e7 100644 --- a/aslprep/workflows/asl/hmc.py +++ b/aslprep/workflows/asl/hmc.py @@ -20,7 +20,7 @@ def init_asl_hmc_wf( mem_gb, omp_nthreads, - name="asl_hmc_wf", + name='asl_hmc_wf', ): """Estimate head-motion parameters and optionally correct them for intensity differences. @@ -98,100 +98,100 @@ def init_asl_hmc_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_file", - "aslcontext", - "processing_target", - "raw_ref_image", + 'asl_file', + 'aslcontext', + 'processing_target', + 'raw_ref_image', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "movpar_file", - "xforms", - "rmsd_file", + 'movpar_file', + 'xforms', + 'rmsd_file', ], ), - name="outputnode", + name='outputnode', ) split_by_volume_type = pe.Node( SplitByVolumeType(), - name="split_by_volume_type", + name='split_by_volume_type', ) workflow.connect([ (inputnode, split_by_volume_type, [ - ("aslcontext", "aslcontext"), - ("asl_file", "asl_file"), + ('aslcontext', 'aslcontext'), + ('asl_file', 'asl_file'), ]), ]) # fmt:skip mcflirt = pe.MapNode( fsl.MCFLIRT(save_mats=True, save_plots=True, save_rms=False), - name="mcflirt", + name='mcflirt', mem_gb=mem_gb * 3, - iterfield=["in_file"], + iterfield=['in_file'], ) workflow.connect([ - (inputnode, mcflirt, [("raw_ref_image", "ref_file")]), - (split_by_volume_type, mcflirt, [("out_files", "in_file")]), + (inputnode, mcflirt, [('raw_ref_image', 'ref_file')]), + (split_by_volume_type, mcflirt, [('out_files', 'in_file')]), ]) # fmt:skip listify_mat_files = pe.MapNode( niu.Function( function=listify, - input_names=["value"], - output_names=["lst"], + input_names=['value'], + output_names=['lst'], ), - name="listify_mat_files", - iterfield=["value"], + name='listify_mat_files', + iterfield=['value'], ) - workflow.connect([(mcflirt, listify_mat_files, [("mat_file", "value")])]) + workflow.connect([(mcflirt, listify_mat_files, [('mat_file', 'value')])]) # Combine the motpars files, mat files, and rms files across the different MCFLIRTed files, # based on the aslcontext file. combine_motpars = pe.Node( CombineMotionParameters(), - name="combine_motpars", + name='combine_motpars', ) workflow.connect([ - (inputnode, combine_motpars, [("aslcontext", "aslcontext")]), - (split_by_volume_type, combine_motpars, [("volume_types", "volume_types")]), - (mcflirt, combine_motpars, [("par_file", "par_files")]), - (listify_mat_files, combine_motpars, [("lst", "mat_files")]), + (inputnode, combine_motpars, [('aslcontext', 'aslcontext')]), + (split_by_volume_type, combine_motpars, [('volume_types', 'volume_types')]), + (mcflirt, combine_motpars, [('par_file', 'par_files')]), + (listify_mat_files, combine_motpars, [('lst', 'mat_files')]), ]) # fmt:skip # Use rmsdiff to calculate relative rms from transform files. - rmsdiff = pe.Node(PairwiseRMSDiff(), name="rmsdiff") + rmsdiff = pe.Node(PairwiseRMSDiff(), name='rmsdiff') workflow.connect([ - (inputnode, rmsdiff, [("raw_ref_image", "ref_file")]), - (combine_motpars, rmsdiff, [("mat_file_list", "in_files")]), - (rmsdiff, outputnode, [("out_file", "rmsd_file")]), + (inputnode, rmsdiff, [('raw_ref_image', 'ref_file')]), + (combine_motpars, rmsdiff, [('mat_file_list', 'in_files')]), + (rmsdiff, outputnode, [('out_file', 'rmsd_file')]), ]) # fmt:skip - fsl2itk = pe.Node(MCFLIRT2ITK(), name="fsl2itk", mem_gb=0.05, n_procs=omp_nthreads) + fsl2itk = pe.Node(MCFLIRT2ITK(), name='fsl2itk', mem_gb=0.05, n_procs=omp_nthreads) workflow.connect([ (inputnode, fsl2itk, [ - ("raw_ref_image", "in_source"), - ("raw_ref_image", "in_reference"), + ('raw_ref_image', 'in_source'), + ('raw_ref_image', 'in_reference'), ]), - (combine_motpars, fsl2itk, [("mat_file_list", "in_files")]), - (fsl2itk, outputnode, [("out_file", "xforms")]), + (combine_motpars, fsl2itk, [('mat_file_list', 'in_files')]), + (fsl2itk, outputnode, [('out_file', 'xforms')]), ]) # fmt:skip normalize_motion = pe.Node( - NormalizeMotionParams(format="FSL"), - name="normalize_motion", + NormalizeMotionParams(format='FSL'), + name='normalize_motion', mem_gb=DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (combine_motpars, normalize_motion, [("combined_par_file", "in_file")]), - (normalize_motion, outputnode, [("out_file", "movpar_file")]), + (combine_motpars, normalize_motion, [('combined_par_file', 'in_file')]), + (normalize_motion, outputnode, [('out_file', 'movpar_file')]), ]) # fmt:skip return workflow diff --git a/aslprep/workflows/asl/outputs.py b/aslprep/workflows/asl/outputs.py index 0be25749d..feac4c40b 100644 --- a/aslprep/workflows/asl/outputs.py +++ b/aslprep/workflows/asl/outputs.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Workflows for writing out derivative files.""" -import typing as ty from nipype.interfaces import utility as niu from nipype.pipeline import engine as pe @@ -13,57 +12,57 @@ from aslprep.interfaces.bids import DerivativesDataSink BASE_INPUT_FIELDS = { - "asl": { - "desc": "preproc", - "suffix": "asl", + 'asl': { + 'desc': 'preproc', + 'suffix': 'asl', }, - "aslref": { - "suffix": "aslref", + 'aslref': { + 'suffix': 'aslref', }, - "asl_mask": { - "desc": "brain", - "suffix": "mask", + 'asl_mask': { + 'desc': 'brain', + 'suffix': 'mask', }, # CBF outputs - "cbf_ts": { - "desc": "timeseries", - "suffix": "cbf", + 'cbf_ts': { + 'desc': 'timeseries', + 'suffix': 'cbf', }, - "mean_cbf": { - "suffix": "cbf", + 'mean_cbf': { + 'suffix': 'cbf', }, - "att": { - "suffix": "att", + 'att': { + 'suffix': 'att', }, # SCORE/SCRUB outputs - "cbf_ts_score": { - "desc": "scoreTimeseries", - "suffix": "cbf", + 'cbf_ts_score': { + 'desc': 'scoreTimeseries', + 'suffix': 'cbf', }, - "mean_cbf_score": { - "desc": "score", - "suffix": "cbf", + 'mean_cbf_score': { + 'desc': 'score', + 'suffix': 'cbf', }, - "mean_cbf_scrub": { - "desc": "scrub", - "suffix": "cbf", + 'mean_cbf_scrub': { + 'desc': 'scrub', + 'suffix': 'cbf', }, # BASIL outputs - "mean_cbf_basil": { - "desc": "basil", - "suffix": "cbf", + 'mean_cbf_basil': { + 'desc': 'basil', + 'suffix': 'cbf', }, - "mean_cbf_gm_basil": { - "desc": "basilGM", - "suffix": "cbf", + 'mean_cbf_gm_basil': { + 'desc': 'basilGM', + 'suffix': 'cbf', }, - "mean_cbf_wm_basil": { - "desc": "basilWM", - "suffix": "cbf", + 'mean_cbf_wm_basil': { + 'desc': 'basilWM', + 'suffix': 'cbf', }, - "att_basil": { - "desc": "basil", - "suffix": "att", + 'att_basil': { + 'desc': 'basil', + 'suffix': 'att', }, } @@ -133,20 +132,20 @@ def prepare_timing_parameters(metadata: dict): timing_parameters = { key: metadata[key] for key in ( - "RepetitionTimePreparation", - "VolumeTiming", - "DelayTime", - "AcquisitionDuration", - "SliceTiming", + 'RepetitionTimePreparation', + 'VolumeTiming', + 'DelayTime', + 'AcquisitionDuration', + 'SliceTiming', ) if key in metadata } # Treat SliceTiming of [] or length 1 as equivalent to missing and remove it in any case - slice_timing = timing_parameters.pop("SliceTiming", []) + slice_timing = timing_parameters.pop('SliceTiming', []) - run_stc = len(slice_timing) > 1 and "slicetiming" not in config.workflow.ignore - timing_parameters["SliceTimingCorrected"] = run_stc + run_stc = len(slice_timing) > 1 and 'slicetiming' not in config.workflow.ignore + timing_parameters['SliceTimingCorrected'] = run_stc return timing_parameters @@ -156,7 +155,7 @@ def init_asl_fit_reports_wf( sdc_correction: bool, freesurfer: bool, # noqa:U100 output_dir: str, - name="asl_fit_reports_wf", + name='asl_fit_reports_wf', ) -> pe.Workflow: """Set up a battery of datasinks to store reports in the right location. @@ -206,33 +205,33 @@ def init_asl_fit_reports_wf( workflow = pe.Workflow(name=name) inputfields = [ - "source_file", - "sdc_aslref", - "coreg_aslref", - "aslref2anat_xfm", - "aslref2fmap_xfm", - "t1w_preproc", - "t1w_mask", - "t1w_dseg", - "fieldmap", - "fmap_ref", + 'source_file', + 'sdc_aslref', + 'coreg_aslref', + 'aslref2anat_xfm', + 'aslref2fmap_xfm', + 't1w_preproc', + 't1w_mask', + 't1w_dseg', + 'fieldmap', + 'fmap_ref', # May be missing - "subject_id", - "subjects_dir", + 'subject_id', + 'subjects_dir', # Report snippets - "summary_report", - "validation_report", + 'summary_report', + 'validation_report', ] - inputnode = pe.Node(niu.IdentityInterface(fields=inputfields), name="inputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=inputfields), name='inputnode') ds_summary = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="summary", - datatype="figures", - dismiss_entities=("echo",), + desc='summary', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_summary", + name='ds_report_summary', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -240,11 +239,11 @@ def init_asl_fit_reports_wf( ds_validation = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="validation", - datatype="figures", - dismiss_entities=("echo",), + desc='validation', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_validation", + name='ds_report_validation', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -256,16 +255,16 @@ def init_asl_fit_reports_wf( default_value=0, float=True, invert_transform_flags=[True], - interpolation="LanczosWindowedSinc", - args="-v", + interpolation='LanczosWindowedSinc', + args='-v', ), - name="t1w_aslref", + name='t1w_aslref', mem_gb=1, ) t1w_wm = pe.Node( niu.Function(function=dseg_label), - name="t1w_wm", + name='t1w_wm', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) t1w_wm.inputs.label = 2 # BIDS default is WM=2 @@ -275,33 +274,33 @@ def init_asl_fit_reports_wf( dimension=3, default_value=0, invert_transform_flags=[True], - interpolation="NearestNeighbor", - args="-v", + interpolation='NearestNeighbor', + args='-v', ), - name="aslref_wm", + name='aslref_wm', mem_gb=1, ) workflow.connect([ (inputnode, ds_summary, [ - ("source_file", "source_file"), - ("summary_report", "in_file"), + ('source_file', 'source_file'), + ('summary_report', 'in_file'), ]), (inputnode, ds_validation, [ - ("source_file", "source_file"), - ("validation_report", "in_file"), + ('source_file', 'source_file'), + ('validation_report', 'in_file'), ]), (inputnode, t1w_aslref, [ - ("t1w_preproc", "input_image"), - ("coreg_aslref", "reference_image"), - ("aslref2anat_xfm", "transforms"), + ('t1w_preproc', 'input_image'), + ('coreg_aslref', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), ]), - (inputnode, t1w_wm, [("t1w_dseg", "in_seg")]), + (inputnode, t1w_wm, [('t1w_dseg', 'in_seg')]), (inputnode, aslref_wm, [ - ("coreg_aslref", "reference_image"), - ("aslref2anat_xfm", "transforms"), + ('coreg_aslref', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), ]), - (t1w_wm, aslref_wm, [("out", "input_image")]), + (t1w_wm, aslref_wm, [('out', 'input_image')]), ]) # fmt:skip # Reportlets follow the structure of init_asl_fit_wf stages @@ -323,77 +322,77 @@ def init_asl_fit_reports_wf( default_value=0, float=True, invert_transform_flags=[True], - interpolation="LanczosWindowedSinc", - args="-v", + interpolation='LanczosWindowedSinc', + args='-v', ), - name="fmapref_aslref", + name='fmapref_aslref', mem_gb=1, ) # SDC1 sdcreg_report = pe.Node( FieldmapReportlet( - reference_label="ASL reference", - moving_label="Fieldmap reference", - show="both", + reference_label='ASL reference', + moving_label='Fieldmap reference', + show='both', ), - name="sdecreg_report", + name='sdecreg_report', mem_gb=0.1, ) ds_sdcreg_report = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="fmapCoreg", - suffix="asl", - datatype="figures", - dismiss_entities=("echo",), + desc='fmapCoreg', + suffix='asl', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_sdcreg_report", + name='ds_sdcreg_report', ) # SDC2 sdc_report = pe.Node( SimpleBeforeAfter( - before_label="Distorted", - after_label="Corrected", + before_label='Distorted', + after_label='Corrected', dismiss_affine=True, ), - name="sdc_report", + name='sdc_report', mem_gb=0.1, ) ds_sdc_report = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="sdc", - suffix="asl", - datatype="figures", - dismiss_entities=("echo",), + desc='sdc', + suffix='asl', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_sdc_report", + name='ds_sdc_report', ) workflow.connect([ (inputnode, fmapref_aslref, [ - ("fmap_ref", "input_image"), - ("coreg_aslref", "reference_image"), - ("aslref2fmap_xfm", "transforms"), + ('fmap_ref', 'input_image'), + ('coreg_aslref', 'reference_image'), + ('aslref2fmap_xfm', 'transforms'), ]), (inputnode, sdcreg_report, [ - ("sdc_aslref", "reference"), - ("fieldmap", "fieldmap") + ('sdc_aslref', 'reference'), + ('fieldmap', 'fieldmap') ]), - (fmapref_aslref, sdcreg_report, [("output_image", "moving")]), - (inputnode, ds_sdcreg_report, [("source_file", "source_file")]), - (sdcreg_report, ds_sdcreg_report, [("out_report", "in_file")]), + (fmapref_aslref, sdcreg_report, [('output_image', 'moving')]), + (inputnode, ds_sdcreg_report, [('source_file', 'source_file')]), + (sdcreg_report, ds_sdcreg_report, [('out_report', 'in_file')]), (inputnode, sdc_report, [ - ("sdc_aslref", "before"), - ("coreg_aslref", "after"), + ('sdc_aslref', 'before'), + ('coreg_aslref', 'after'), ]), - (aslref_wm, sdc_report, [("output_image", "wm_seg")]), - (inputnode, ds_sdc_report, [("source_file", "source_file")]), - (sdc_report, ds_sdc_report, [("out_report", "in_file")]), + (aslref_wm, sdc_report, [('output_image', 'wm_seg')]), + (inputnode, ds_sdc_report, [('source_file', 'source_file')]), + (sdc_report, ds_sdc_report, [('out_report', 'in_file')]), ]) # fmt:skip # EPI-T1 registration @@ -401,31 +400,31 @@ def init_asl_fit_reports_wf( epi_t1_report = pe.Node( SimpleBeforeAfter( - before_label="T1w", - after_label="EPI", + before_label='T1w', + after_label='EPI', dismiss_affine=True, ), - name="epi_t1_report", + name='epi_t1_report', mem_gb=0.1, ) ds_epi_t1_report = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="coreg", - suffix="asl", - datatype="figures", - dismiss_entities=("echo",), + desc='coreg', + suffix='asl', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_epi_t1_report", + name='ds_epi_t1_report', ) workflow.connect([ - (inputnode, epi_t1_report, [("coreg_aslref", "after")]), - (t1w_aslref, epi_t1_report, [("output_image", "before")]), - (aslref_wm, epi_t1_report, [("output_image", "wm_seg")]), - (inputnode, ds_epi_t1_report, [("source_file", "source_file")]), - (epi_t1_report, ds_epi_t1_report, [("out_report", "in_file")]), + (inputnode, epi_t1_report, [('coreg_aslref', 'after')]), + (t1w_aslref, epi_t1_report, [('output_image', 'before')]), + (aslref_wm, epi_t1_report, [('output_image', 'wm_seg')]), + (inputnode, ds_epi_t1_report, [('source_file', 'source_file')]), + (epi_t1_report, ds_epi_t1_report, [('out_report', 'in_file')]), ]) # fmt:skip return workflow @@ -436,40 +435,40 @@ def init_ds_aslref_wf( bids_root, output_dir, desc: str, - name="ds_aslref_wf", + name='ds_aslref_wf', ) -> pe.Workflow: """Write out aslref image.""" workflow = pe.Workflow(name=name) inputnode = pe.Node( - niu.IdentityInterface(fields=["source_files", "aslref"]), - name="inputnode", + niu.IdentityInterface(fields=['source_files', 'aslref']), + name='inputnode', ) - outputnode = pe.Node(niu.IdentityInterface(fields=["aslref"]), name="outputnode") + outputnode = pe.Node(niu.IdentityInterface(fields=['aslref']), name='outputnode') - raw_sources = pe.Node(niu.Function(function=_bids_relative), name="raw_sources") + raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources') raw_sources.inputs.bids_root = bids_root ds_aslref = pe.Node( DerivativesDataSink( base_directory=output_dir, desc=desc, - suffix="aslref", + suffix='aslref', compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), ), - name="ds_aslref", + name='ds_aslref', run_without_submitting=True, ) workflow.connect([ - (inputnode, raw_sources, [("source_files", "in_files")]), + (inputnode, raw_sources, [('source_files', 'in_files')]), (inputnode, ds_aslref, [ - ("aslref", "in_file"), - ("source_files", "source_file"), + ('aslref', 'in_file'), + ('source_files', 'source_file'), ]), - (raw_sources, ds_aslref, [("out", "RawSources")]), - (ds_aslref, outputnode, [("out_file", "aslref")]), + (raw_sources, ds_aslref, [('out', 'RawSources')]), + (ds_aslref, outputnode, [('out_file', 'aslref')]), ]) # fmt:skip return workflow @@ -480,73 +479,73 @@ def init_ds_asl_native_wf( bids_root: str, output_dir: str, asl_output: bool, - metadata: ty.List[dict], - cbf_3d: ty.List[str], - cbf_4d: ty.List[str], - att: ty.List[str], - name="ds_asl_native_wf", + metadata: list[dict], + cbf_3d: list[str], + cbf_4d: list[str], + att: list[str], + name='ds_asl_native_wf', ) -> pe.Workflow: """Write out aslref-space outputs.""" workflow = pe.Workflow(name=name) inputnode_fields = [ - "source_files", - "asl", - "asl_mask", + 'source_files', + 'asl', + 'asl_mask', ] inputnode_fields += cbf_3d inputnode_fields += cbf_4d inputnode_fields += att inputnode = pe.Node( niu.IdentityInterface(fields=inputnode_fields), - name="inputnode", + name='inputnode', ) - raw_sources = pe.Node(niu.Function(function=_bids_relative), name="raw_sources") + raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources') raw_sources.inputs.bids_root = bids_root - workflow.connect([(inputnode, raw_sources, [("source_files", "in_files")])]) + workflow.connect([(inputnode, raw_sources, [('source_files', 'in_files')])]) # Masks should be output if any other derivatives are output ds_asl_mask = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="brain", - suffix="mask", + desc='brain', + suffix='mask', compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), ), - name="ds_asl_mask", + name='ds_asl_mask', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ (inputnode, ds_asl_mask, [ - ("source_files", "source_file"), - ("asl_mask", "in_file"), + ('source_files', 'source_file'), + ('asl_mask', 'in_file'), ]), - (raw_sources, ds_asl_mask, [("out", "RawSources")]), + (raw_sources, ds_asl_mask, [('out', 'RawSources')]), ]) # fmt:skip if asl_output: ds_asl = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="preproc", + desc='preproc', compress=True, SkullStripped=False, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **metadata, ), - name="ds_asl", + name='ds_asl', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(inputnode, ds_asl, [("asl", "in_file")])]) + workflow.connect([(inputnode, ds_asl, [('asl', 'in_file')])]) datasinks = [ds_asl] for cbf_name in cbf_4d + cbf_3d: # TODO: Add EstimationReference and EstimationAlgorithm cbf_meta = { - "Units": "mL/100 g/min", + 'Units': 'mL/100 g/min', } fields = BASE_INPUT_FIELDS[cbf_name] @@ -554,21 +553,21 @@ def init_ds_asl_native_wf( DerivativesDataSink( base_directory=output_dir, compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **fields, **cbf_meta, ), - name=f"ds_{cbf_name}", + name=f'ds_{cbf_name}', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) datasinks.append(ds_cbf) - workflow.connect([(inputnode, ds_cbf, [(cbf_name, "in_file")])]) + workflow.connect([(inputnode, ds_cbf, [(cbf_name, 'in_file')])]) for att_name in att: # TODO: Add EstimationReference and EstimationAlgorithm att_meta = { - "Units": "s", + 'Units': 's', } fields = BASE_INPUT_FIELDS[att_name] @@ -576,23 +575,23 @@ def init_ds_asl_native_wf( DerivativesDataSink( base_directory=output_dir, compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **fields, **att_meta, ), - name=f"ds_{att_name}", + name=f'ds_{att_name}', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) datasinks.append(ds_att) - workflow.connect([(inputnode, ds_att, [(att_name, "in_file")])]) + workflow.connect([(inputnode, ds_att, [(att_name, 'in_file')])]) workflow.connect( [ - (inputnode, datasink, [("source_files", "source_file")]) for datasink in datasinks + (inputnode, datasink, [('source_files', 'source_file')]) for datasink in datasinks ] + [ - (raw_sources, datasink, [("out", "RawSources")]) for datasink in datasinks + (raw_sources, datasink, [('out', 'RawSources')]) for datasink in datasinks ] ) # fmt:skip @@ -603,69 +602,69 @@ def init_ds_volumes_wf( *, bids_root: str, output_dir: str, - metadata: ty.List[dict], - cbf_3d: ty.List[str], - cbf_4d: ty.List[str], - att: ty.List[str], - name: str = "ds_volumes_wf", + metadata: list[dict], + cbf_3d: list[str], + cbf_4d: list[str], + att: list[str], + name: str = 'ds_volumes_wf', ) -> pe.Workflow: """Apply transforms from reference to anatomical/standard space and write out derivatives.""" workflow = pe.Workflow(name=name) inputnode_fields = [ - "source_files", - "ref_file", - "asl", # Resampled into target space - "asl_mask", # aslref space - "aslref", # aslref space + 'source_files', + 'ref_file', + 'asl', # Resampled into target space + 'asl_mask', # aslref space + 'aslref', # aslref space # Anatomical - "aslref2anat_xfm", + 'aslref2anat_xfm', # Template - "anat2std_xfm", + 'anat2std_xfm', # Entities - "space", - "cohort", - "resolution", + 'space', + 'cohort', + 'resolution', ] inputnode_fields += cbf_3d inputnode_fields += cbf_4d inputnode_fields += att inputnode = pe.Node( niu.IdentityInterface(fields=inputnode_fields), - name="inputnode", + name='inputnode', ) - raw_sources = pe.Node(niu.Function(function=_bids_relative), name="raw_sources") + raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources') raw_sources.inputs.bids_root = bids_root - aslref2target = pe.Node(niu.Merge(2), name="aslref2target") + aslref2target = pe.Node(niu.Merge(2), name='aslref2target') # BOLD is pre-resampled ds_asl = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="preproc", + desc='preproc', compress=True, SkullStripped=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **metadata, ), - name="ds_asl", + name='ds_asl', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, raw_sources, [("source_files", "in_files")]), + (inputnode, raw_sources, [('source_files', 'in_files')]), # Note that ANTs expects transforms in target-to-source order # Reverse this for nitransforms-based resamplers (inputnode, aslref2target, [ - ("anat2std_xfm", "in1"), - ("aslref2anat_xfm", "in2"), + ('anat2std_xfm', 'in1'), + ('aslref2anat_xfm', 'in2'), ]), (inputnode, ds_asl, [ - ("source_files", "source_file"), - ("asl", "in_file"), - ("space", "space"), - ("cohort", "cohort"), - ("resolution", "resolution"), + ('source_files', 'source_file'), + ('asl', 'in_file'), + ('space', 'space'), + ('cohort', 'cohort'), + ('resolution', 'resolution'), ]), ]) # fmt:skip @@ -674,44 +673,44 @@ def init_ds_volumes_wf( dimension=3, default_value=0, float=True, - interpolation="LanczosWindowedSinc", - args="-v", + interpolation='LanczosWindowedSinc', + args='-v', ), - name="resample_ref", + name='resample_ref', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) resample_mask = pe.Node( - ApplyTransforms(interpolation="GenericLabel", args="-v"), - name="resample_mask", + ApplyTransforms(interpolation='GenericLabel', args='-v'), + name='resample_mask', ) resamplers = [resample_ref, resample_mask] workflow.connect([ - (inputnode, resample_ref, [("aslref", "input_image")]), - (inputnode, resample_mask, [("asl_mask", "input_image")]), + (inputnode, resample_ref, [('aslref', 'input_image')]), + (inputnode, resample_mask, [('asl_mask', 'input_image')]), ]) # fmt:skip ds_ref = pe.Node( DerivativesDataSink( base_directory=output_dir, - suffix="aslref", + suffix='aslref', compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), ), - name="ds_ref", + name='ds_ref', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) ds_mask = pe.Node( DerivativesDataSink( base_directory=output_dir, - desc="brain", - suffix="mask", + desc='brain', + suffix='mask', compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), ), - name="ds_mask", + name='ds_mask', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -720,23 +719,23 @@ def init_ds_volumes_wf( for cbf_name in cbf_4d + cbf_3d: # TODO: Add EstimationReference and EstimationAlgorithm cbf_meta = { - "Units": "mL/100 g/min", + 'Units': 'mL/100 g/min', } fields = BASE_INPUT_FIELDS[cbf_name] kwargs = {} if cbf_name in cbf_4d: - kwargs["dimension"] = 3 + kwargs['dimension'] = 3 resample_cbf = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', float=True, input_image_type=3, - args="-v", + args='-v', **kwargs, ), - name=f"warp_{cbf_name}_to_std", + name=f'warp_{cbf_name}_to_std', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -744,34 +743,34 @@ def init_ds_volumes_wf( DerivativesDataSink( base_directory=output_dir, compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **fields, **cbf_meta, ), - name=f"ds_{cbf_name}", + name=f'ds_{cbf_name}', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) resamplers.append(resample_cbf) datasinks.append(ds_cbf) - workflow.connect([(inputnode, resample_cbf, [(cbf_name, "input_image")])]) + workflow.connect([(inputnode, resample_cbf, [(cbf_name, 'input_image')])]) for att_name in att: # TODO: Add EstimationReference and EstimationAlgorithm att_meta = { - "Units": "s", + 'Units': 's', } fields = BASE_INPUT_FIELDS[att_name] resample_att = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', float=True, input_image_type=3, - args="-v", + args='-v', ), - name=f"warp_{att_name}_to_std", + name=f'warp_{att_name}_to_std', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) @@ -780,37 +779,37 @@ def init_ds_volumes_wf( DerivativesDataSink( base_directory=output_dir, compress=True, - dismiss_entities=("echo",), + dismiss_entities=('echo',), **fields, **att_meta, ), - name=f"ds_{att_name}", + name=f'ds_{att_name}', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) resamplers.append(resample_att) datasinks.append(ds_att) - workflow.connect([(inputnode, resample_att, [(att_name, "input_image")])]) + workflow.connect([(inputnode, resample_att, [(att_name, 'input_image')])]) workflow.connect( [ - (inputnode, resampler, [("ref_file", "reference_image")]) + (inputnode, resampler, [('ref_file', 'reference_image')]) for resampler in resamplers ] + [ - (aslref2target, resampler, [("out", "transforms")]) + (aslref2target, resampler, [('out', 'transforms')]) for resampler in resamplers ] + [ (inputnode, datasink, [ - ("source_files", "source_file"), - ("space", "space"), - ("cohort", "cohort"), - ("resolution", "resolution"), + ('source_files', 'source_file'), + ('space', 'space'), + ('cohort', 'cohort'), + ('resolution', 'resolution'), ]) for datasink in datasinks ] + [ - (resampler, datasink, [("output_image", "in_file")]) - for resampler, datasink in zip(resamplers, datasinks) + (resampler, datasink, [('output_image', 'in_file')]) + for resampler, datasink in zip(resamplers, datasinks, strict=False) ] ) # fmt:skip @@ -821,12 +820,12 @@ def init_ds_ciftis_wf( *, bids_root: str, output_dir: str, - metadata: ty.List[dict], - cbf_3d: ty.List[str], - cbf_4d: ty.List[str], - att: ty.List[str], + metadata: list[dict], + cbf_3d: list[str], + cbf_4d: list[str], + att: list[str], omp_nthreads: int, - name: str = "ds_ciftis_wf", + name: str = 'ds_ciftis_wf', ) -> pe.Workflow: """Apply transforms from reference to fsLR space and write out derivatives.""" from fmriprep.workflows.bold.resampling import init_bold_grayords_wf @@ -835,30 +834,30 @@ def init_ds_ciftis_wf( workflow = pe.Workflow(name=name) inputnode_fields = [ - "asl_cifti", - "source_files", + 'asl_cifti', + 'source_files', # Anatomical - "anat", - "aslref2anat_xfm", + 'anat', + 'aslref2anat_xfm', # Template - "anat2mni6_xfm", - "mni6_mask", + 'anat2mni6_xfm', + 'mni6_mask', # Pre-computed goodvoxels mask. May be Undefined. - "goodvoxels_mask", + 'goodvoxels_mask', # Other inputs - "white", - "pial", - "midthickness", - "midthickness_fsLR", - "sphere_reg_fsLR", - "cortex_mask", + 'white', + 'pial', + 'midthickness', + 'midthickness_fsLR', + 'sphere_reg_fsLR', + 'cortex_mask', ] inputnode_fields += cbf_3d inputnode_fields += cbf_4d inputnode_fields += att inputnode = pe.Node( niu.IdentityInterface(fields=inputnode_fields), - name="inputnode", + name='inputnode', ) outputnode_fields = [] @@ -867,144 +866,144 @@ def init_ds_ciftis_wf( outputnode_fields += att outputnode = pe.Node( niu.IdentityInterface(fields=outputnode_fields), - name="outputnode", + name='outputnode', ) - raw_sources = pe.Node(niu.Function(function=_bids_relative), name="raw_sources") + raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources') raw_sources.inputs.bids_root = bids_root - workflow.connect([(inputnode, raw_sources, [("source_files", "in_files")])]) + workflow.connect([(inputnode, raw_sources, [('source_files', 'in_files')])]) ds_asl_cifti = pe.Node( DerivativesDataSink( base_directory=output_dir, - space="fsLR", + space='fsLR', density=config.workflow.cifti_output, - suffix="asl", - extension="dtseries.nii", + suffix='asl', + extension='dtseries.nii', compress=False, ), - name="ds_asl_cifti", + name='ds_asl_cifti', run_without_submitting=True, ) workflow.connect([ (inputnode, ds_asl_cifti, [ - ("asl_cifti", "in_file"), - ("source_files", "source_file"), + ('asl_cifti', 'in_file'), + ('source_files', 'source_file'), ]), ]) # fmt:skip - aslref2MNI6 = pe.Node(niu.Merge(2), name="aslref2MNI6") + aslref2MNI6 = pe.Node(niu.Merge(2), name='aslref2MNI6') workflow.connect([ (inputnode, aslref2MNI6, [ - ("aslref2anat_xfm", "in1"), - ("anat2mni6_xfm", "in2"), + ('aslref2anat_xfm', 'in1'), + ('anat2mni6_xfm', 'in2'), ]), ]) # fmt:skip for cbf_deriv in cbf_4d + cbf_3d + att: kwargs = {} - extension = "dscalar.nii" + extension = 'dscalar.nii' if cbf_deriv in cbf_4d: - kwargs["dimension"] = 3 - extension = "dtseries.nii" + kwargs['dimension'] = 3 + extension = 'dtseries.nii' if cbf_deriv in att: - meta = {"Units": "s"} + meta = {'Units': 's'} else: - meta = {"Units": "mL/100 g/min"} + meta = {'Units': 'mL/100 g/min'} warp_cbf_to_anat = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', float=True, input_image_type=3, - args="-v", + args='-v', **kwargs, ), - name=f"warp_{cbf_deriv}_to_anat", + name=f'warp_{cbf_deriv}_to_anat', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ (inputnode, warp_cbf_to_anat, [ - (cbf_deriv, "input_image"), - ("anat", "reference_image"), - ("aslref2anat_xfm", "transforms"), + (cbf_deriv, 'input_image'), + ('anat', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), ]), ]) # fmt:skip warp_cbf_to_MNI6 = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', float=True, input_image_type=3, - args="-v", + args='-v', **kwargs, ), - name=f"warp_{cbf_deriv}_to_MNI6", + name=f'warp_{cbf_deriv}_to_MNI6', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ (inputnode, warp_cbf_to_MNI6, [ - ("mni6_mask", "reference_image"), - (cbf_deriv, "input_image"), + ('mni6_mask', 'reference_image'), + (cbf_deriv, 'input_image'), ]), - (aslref2MNI6, warp_cbf_to_MNI6, [("out", "transforms")]), + (aslref2MNI6, warp_cbf_to_MNI6, [('out', 'transforms')]), ]) # fmt:skip cbf_fsLR_resampling_wf = init_bold_fsLR_resampling_wf( grayord_density=config.workflow.cifti_output, omp_nthreads=omp_nthreads, mem_gb=config.DEFAULT_MEMORY_MIN_GB, - name=f"{cbf_deriv}_fsLR_resampling_wf", + name=f'{cbf_deriv}_fsLR_resampling_wf', ) workflow.connect([ # Resample T1w-space CBF to fsLR surfaces (inputnode, cbf_fsLR_resampling_wf, [ - ("white", "inputnode.white"), - ("pial", "inputnode.pial"), - ("midthickness", "inputnode.midthickness"), - ("midthickness_fsLR", "inputnode.midthickness_fsLR"), - ("sphere_reg_fsLR", "inputnode.sphere_reg_fsLR"), - ("cortex_mask", "inputnode.cortex_mask"), - ("goodvoxels_mask", "inputnode.volume_roi"), + ('white', 'inputnode.white'), + ('pial', 'inputnode.pial'), + ('midthickness', 'inputnode.midthickness'), + ('midthickness_fsLR', 'inputnode.midthickness_fsLR'), + ('sphere_reg_fsLR', 'inputnode.sphere_reg_fsLR'), + ('cortex_mask', 'inputnode.cortex_mask'), + ('goodvoxels_mask', 'inputnode.volume_roi'), ]), - (warp_cbf_to_anat, cbf_fsLR_resampling_wf, [("output_image", "inputnode.bold_file")]) + (warp_cbf_to_anat, cbf_fsLR_resampling_wf, [('output_image', 'inputnode.bold_file')]) ]) # fmt:skip cbf_grayords_wf = init_bold_grayords_wf( grayord_density=config.workflow.cifti_output, mem_gb=config.DEFAULT_MEMORY_MIN_GB, - repetition_time=metadata["RepetitionTime"], - name=f"{cbf_deriv}_grayords_wf", + repetition_time=metadata['RepetitionTime'], + name=f'{cbf_deriv}_grayords_wf', ) workflow.connect([ - (warp_cbf_to_MNI6, cbf_grayords_wf, [("output_image", "inputnode.bold_std")]), + (warp_cbf_to_MNI6, cbf_grayords_wf, [('output_image', 'inputnode.bold_std')]), (cbf_fsLR_resampling_wf, cbf_grayords_wf, [ - ("outputnode.bold_fsLR", "inputnode.bold_fsLR"), + ('outputnode.bold_fsLR', 'inputnode.bold_fsLR'), ]), ]) # fmt:skip ds_cbf_cifti = pe.Node( DerivativesDataSink( base_directory=output_dir, - space="fsLR", + space='fsLR', density=config.workflow.cifti_output, extension=extension, compress=False, **BASE_INPUT_FIELDS[cbf_deriv], **meta, ), - name=f"ds_{cbf_deriv}_cifti", + name=f'ds_{cbf_deriv}_cifti', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_cbf_cifti, [("source_files", "source_file")]), - (raw_sources, ds_cbf_cifti, [("out", "RawSources")]), + (inputnode, ds_cbf_cifti, [('source_files', 'source_file')]), + (raw_sources, ds_cbf_cifti, [('out', 'RawSources')]), (cbf_grayords_wf, ds_cbf_cifti, [ - ("outputnode.cifti_bold", "in_file"), - (("outputnode.cifti_metadata", _read_json), "meta_dict"), + ('outputnode.cifti_bold', 'in_file'), + (('outputnode.cifti_metadata', _read_json), 'meta_dict'), ]), - (ds_cbf_cifti, outputnode, [("out_file", cbf_deriv)]) + (ds_cbf_cifti, outputnode, [('out_file', cbf_deriv)]) ]) # fmt:skip return workflow @@ -1015,6 +1014,6 @@ def _read_json(in_file): from pathlib import Path if not isinstance(in_file, str): - raise ValueError(f"_read_json: input is not str ({in_file})") + raise ValueError(f'_read_json: input is not str ({in_file})') return loads(Path(in_file).read_text()) diff --git a/aslprep/workflows/asl/plotting.py b/aslprep/workflows/asl/plotting.py index c622edf59..b1360c4ec 100644 --- a/aslprep/workflows/asl/plotting.py +++ b/aslprep/workflows/asl/plotting.py @@ -20,7 +20,7 @@ def init_cbf_reporting_wf( plot_timeseries=True, scorescrub=False, basil=False, - name="cbf_reporting_wf", + name='cbf_reporting_wf', ): """Generate CBF reports. @@ -45,49 +45,49 @@ def init_cbf_reporting_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "aslref", - "asl_mask", - "t1w_dseg", - "aslref2anat_xfm", - "std2anat_xfm", - "confounds_file", - "qc_file", + 'aslref', + 'asl_mask', + 't1w_dseg', + 'aslref2anat_xfm', + 'std2anat_xfm', + 'confounds_file', + 'qc_file', # If plot_timeseries is True - "crown_mask", - "acompcor_masks", + 'crown_mask', + 'acompcor_masks', # CBF outputs - "mean_cbf", + 'mean_cbf', # Single-delay outputs - "cbf_ts", # only for non-GE + 'cbf_ts', # only for non-GE # If CIFTI is enabled - "cifti_cbf_ts", + 'cifti_cbf_ts', # Multi-delay outputs - "att", + 'att', # SCORE/SCRUB outputs - "cbf_ts_score", # unused - "mean_cbf_score", - "mean_cbf_scrub", - "score_outlier_index", + 'cbf_ts_score', # unused + 'mean_cbf_score', + 'mean_cbf_scrub', + 'score_outlier_index', # BASIL outputs - "mean_cbf_basil", - "mean_cbf_gm_basil", - "mean_cbf_wm_basil", # unused - "att_basil", # unused + 'mean_cbf_basil', + 'mean_cbf_gm_basil', + 'mean_cbf_wm_basil', # unused + 'att_basil', # unused ], ), - name="inputnode", + name='inputnode', ) summary = pe.Node( CBFSummary(), - name="summary", + name='summary', mem_gb=config.DEFAULT_MEMORY_MIN_GB, run_without_submitting=True, ) workflow.connect([ (inputnode, summary, [ - ("confounds_file", "confounds_file"), - ("qc_file", "qc_file"), + ('confounds_file', 'confounds_file'), + ('qc_file', 'qc_file'), ]) ]) # fmt:skip @@ -97,291 +97,291 @@ def init_cbf_reporting_wf( float=True, dimension=3, default_value=0, - interpolation="GenericLabel", + interpolation='GenericLabel', invert_transform_flags=[True], - args="-v", + args='-v', ), - name="warp_t1w_dseg_to_aslref", + name='warp_t1w_dseg_to_aslref', ) workflow.connect([ (inputnode, warp_t1w_dseg_to_aslref, [ - ("asl_mask", "reference_image"), - ("t1w_dseg", "input_image"), - ("aslref2anat_xfm", "transforms"), + ('asl_mask', 'reference_image'), + ('t1w_dseg', 'input_image'), + ('aslref2anat_xfm', 'transforms'), ]), ]) # fmt:skip if plot_timeseries: # Global and segment regressors signals_class_labels = [ - "global_signal", - "csf", - "white_matter", - "csf_wm", + 'global_signal', + 'csf', + 'white_matter', + 'csf_wm', ] merge_rois = pe.Node( niu.Merge(2, ravel_inputs=True), - name="merge_rois", + name='merge_rois', run_without_submitting=True, ) signals = pe.Node( SignalExtraction(class_labels=signals_class_labels), - name="signals", + name='signals', mem_gb=2, ) workflow.connect([ (inputnode, merge_rois, [ - ("asl_mask", "in1"), - ("acompcor_masks", "in2"), + ('asl_mask', 'in1'), + ('acompcor_masks', 'in2'), ]), - (inputnode, signals, [("cbf_ts", "in_file")]), - (merge_rois, signals, [("out", "label_files")]), + (inputnode, signals, [('cbf_ts', 'in_file')]), + (merge_rois, signals, [('out', 'label_files')]), ]) # fmt:skip # Time series are only available for non-GE data. # Create confounds file with SCORE index cbf_confounds = pe.Node( GatherCBFConfounds(), - name="cbf_confounds", + name='cbf_confounds', ) workflow.connect([ - (inputnode, cbf_confounds, [("score_outlier_index", "score")]), - (signals, cbf_confounds, [("out_file", "signals")]), + (inputnode, cbf_confounds, [('score_outlier_index', 'score')]), + (signals, cbf_confounds, [('out_file', 'signals')]), ]) # fmt:skip carpetplot_wf = init_carpetplot_wf( mem_gb=2, confounds_list=[ - ("global_signal", None, "GS"), - ("csf", None, "GSCSF"), - ("white_matter", None, "GSWM"), + ('global_signal', None, 'GS'), + ('csf', None, 'GSCSF'), + ('white_matter', None, 'GSWM'), ] - + ([("score_outlier_index", None, "SCORE Index")] if scorescrub else []), + + ([('score_outlier_index', None, 'SCORE Index')] if scorescrub else []), metadata=metadata, cifti_output=False, - suffix="cbf", - name="cbf_carpetplot_wf", + suffix='cbf', + name='cbf_carpetplot_wf', ) carpetplot_wf.inputs.inputnode.dummy_scans = 0 workflow.connect([ (inputnode, carpetplot_wf, [ - ("std2anat_xfm", "inputnode.std2anat_xfm"), - ("cbf_ts", "inputnode.asl"), - ("asl_mask", "inputnode.asl_mask"), - ("aslref2anat_xfm", "inputnode.aslref2anat_xfm"), - ("crown_mask", "inputnode.crown_mask"), - (("acompcor_masks", _select_last_in_list), "inputnode.acompcor_mask"), - ("cifti_cbf_ts", "inputnode.cifti_asl"), + ('std2anat_xfm', 'inputnode.std2anat_xfm'), + ('cbf_ts', 'inputnode.asl'), + ('asl_mask', 'inputnode.asl_mask'), + ('aslref2anat_xfm', 'inputnode.aslref2anat_xfm'), + ('crown_mask', 'inputnode.crown_mask'), + (('acompcor_masks', _select_last_in_list), 'inputnode.acompcor_mask'), + ('cifti_cbf_ts', 'inputnode.cifti_asl'), ]), - (cbf_confounds, carpetplot_wf, [("confounds_file", "inputnode.confounds_file")]), + (cbf_confounds, carpetplot_wf, [('confounds_file', 'inputnode.confounds_file')]), ]) # fmt:skip - cbf_summary = pe.Node(CBFSummaryPlot(label="cbf", vmax=100), name="cbf_summary", mem_gb=1) + cbf_summary = pe.Node(CBFSummaryPlot(label='cbf', vmax=100), name='cbf_summary', mem_gb=1) workflow.connect([ (inputnode, cbf_summary, [ - ("mean_cbf", "cbf"), - ("aslref", "ref_vol"), + ('mean_cbf', 'cbf'), + ('aslref', 'ref_vol'), ]), ]) # fmt:skip ds_report_cbf = pe.Node( - DerivativesDataSink(datatype="figures", desc="cbf", suffix="cbf", keep_dtype=True), - name="ds_report_cbf", + DerivativesDataSink(datatype='figures', desc='cbf', suffix='cbf', keep_dtype=True), + name='ds_report_cbf', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(cbf_summary, ds_report_cbf, [("out_file", "in_file")])]) + workflow.connect([(cbf_summary, ds_report_cbf, [('out_file', 'in_file')])]) cbf_by_tt_plot = pe.Node( CBFByTissueTypePlot(), - name="cbf_by_tt_plot", + name='cbf_by_tt_plot', ) workflow.connect([ - (inputnode, cbf_by_tt_plot, [("mean_cbf", "cbf")]), - (warp_t1w_dseg_to_aslref, cbf_by_tt_plot, [("output_image", "seg_file")]), + (inputnode, cbf_by_tt_plot, [('mean_cbf', 'cbf')]), + (warp_t1w_dseg_to_aslref, cbf_by_tt_plot, [('output_image', 'seg_file')]), ]) # fmt:skip ds_report_cbf_by_tt = pe.Node( DerivativesDataSink( - datatype="figures", - desc="cbfByTissueType", - suffix="cbf", + datatype='figures', + desc='cbfByTissueType', + suffix='cbf', keep_dtype=True, ), - name="ds_report_cbf_by_tt", + name='ds_report_cbf_by_tt', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(cbf_by_tt_plot, ds_report_cbf_by_tt, [("out_file", "in_file")])]) + workflow.connect([(cbf_by_tt_plot, ds_report_cbf_by_tt, [('out_file', 'in_file')])]) if scorescrub: score_summary = pe.Node( - CBFSummaryPlot(label="score", vmax=100), - name="score_summary", + CBFSummaryPlot(label='score', vmax=100), + name='score_summary', mem_gb=1, ) workflow.connect([ (inputnode, score_summary, [ - ("mean_cbf_score", "cbf"), - ("aslref", "ref_vol"), + ('mean_cbf_score', 'cbf'), + ('aslref', 'ref_vol'), ]), ]) # fmt:skip ds_report_score = pe.Node( - DerivativesDataSink(datatype="figures", desc="score", suffix="cbf", keep_dtype=True), - name="ds_report_score", + DerivativesDataSink(datatype='figures', desc='score', suffix='cbf', keep_dtype=True), + name='ds_report_score', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(score_summary, ds_report_score, [("out_file", "in_file")])]) + workflow.connect([(score_summary, ds_report_score, [('out_file', 'in_file')])]) score_by_tt_plot = pe.Node( CBFByTissueTypePlot(), - name="score_by_tt_plot", + name='score_by_tt_plot', ) workflow.connect([ - (inputnode, score_by_tt_plot, [("mean_cbf_score", "cbf")]), - (warp_t1w_dseg_to_aslref, score_by_tt_plot, [("output_image", "seg_file")]), + (inputnode, score_by_tt_plot, [('mean_cbf_score', 'cbf')]), + (warp_t1w_dseg_to_aslref, score_by_tt_plot, [('output_image', 'seg_file')]), ]) # fmt:skip ds_report_score_by_tt = pe.Node( DerivativesDataSink( - datatype="figures", - desc="scoreByTissueType", - suffix="cbf", + datatype='figures', + desc='scoreByTissueType', + suffix='cbf', keep_dtype=True, ), - name="ds_report_score_by_tt", + name='ds_report_score_by_tt', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(score_by_tt_plot, ds_report_score_by_tt, [("out_file", "in_file")])]) + workflow.connect([(score_by_tt_plot, ds_report_score_by_tt, [('out_file', 'in_file')])]) scrub_summary = pe.Node( - CBFSummaryPlot(label="scrub", vmax=100), - name="scrub_summary", + CBFSummaryPlot(label='scrub', vmax=100), + name='scrub_summary', mem_gb=1, ) workflow.connect([ (inputnode, scrub_summary, [ - ("mean_cbf_scrub", "cbf"), - ("aslref", "ref_vol"), + ('mean_cbf_scrub', 'cbf'), + ('aslref', 'ref_vol'), ]), ]) # fmt:skip ds_report_scrub = pe.Node( - DerivativesDataSink(datatype="figures", desc="scrub", suffix="cbf", keep_dtype=True), - name="ds_report_scrub", + DerivativesDataSink(datatype='figures', desc='scrub', suffix='cbf', keep_dtype=True), + name='ds_report_scrub', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(scrub_summary, ds_report_scrub, [("out_file", "in_file")])]) + workflow.connect([(scrub_summary, ds_report_scrub, [('out_file', 'in_file')])]) scrub_by_tt_plot = pe.Node( CBFByTissueTypePlot(), - name="scrub_by_tt_plot", + name='scrub_by_tt_plot', ) workflow.connect([ - (inputnode, scrub_by_tt_plot, [("mean_cbf_scrub", "cbf")]), - (warp_t1w_dseg_to_aslref, scrub_by_tt_plot, [("output_image", "seg_file")]), + (inputnode, scrub_by_tt_plot, [('mean_cbf_scrub', 'cbf')]), + (warp_t1w_dseg_to_aslref, scrub_by_tt_plot, [('output_image', 'seg_file')]), ]) # fmt:skip ds_report_scrub_by_tt = pe.Node( DerivativesDataSink( - datatype="figures", - desc="scrubByTissueType", - suffix="cbf", + datatype='figures', + desc='scrubByTissueType', + suffix='cbf', keep_dtype=True, ), - name="ds_report_scrub_by_tt", + name='ds_report_scrub_by_tt', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(scrub_by_tt_plot, ds_report_scrub_by_tt, [("out_file", "in_file")])]) + workflow.connect([(scrub_by_tt_plot, ds_report_scrub_by_tt, [('out_file', 'in_file')])]) if basil: basil_summary = pe.Node( - CBFSummaryPlot(label="basil", vmax=100), - name="basil_summary", + CBFSummaryPlot(label='basil', vmax=100), + name='basil_summary', mem_gb=1, ) workflow.connect([ (inputnode, basil_summary, [ - ("mean_cbf_basil", "cbf"), - ("aslref", "ref_vol"), + ('mean_cbf_basil', 'cbf'), + ('aslref', 'ref_vol'), ]), ]) # fmt:skip ds_report_basil = pe.Node( - DerivativesDataSink(datatype="figures", desc="basil", suffix="cbf", keep_dtype=True), - name="ds_report_basil", + DerivativesDataSink(datatype='figures', desc='basil', suffix='cbf', keep_dtype=True), + name='ds_report_basil', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(basil_summary, ds_report_basil, [("out_file", "in_file")])]) + workflow.connect([(basil_summary, ds_report_basil, [('out_file', 'in_file')])]) basil_by_tt_plot = pe.Node( CBFByTissueTypePlot(), - name="basil_by_tt_plot", + name='basil_by_tt_plot', ) workflow.connect([ - (inputnode, basil_by_tt_plot, [("mean_cbf_basil", "cbf")]), - (warp_t1w_dseg_to_aslref, basil_by_tt_plot, [("output_image", "seg_file")]), + (inputnode, basil_by_tt_plot, [('mean_cbf_basil', 'cbf')]), + (warp_t1w_dseg_to_aslref, basil_by_tt_plot, [('output_image', 'seg_file')]), ]) # fmt:skip ds_report_basil_by_tt = pe.Node( DerivativesDataSink( - datatype="figures", - desc="basilByTissueType", - suffix="cbf", + datatype='figures', + desc='basilByTissueType', + suffix='cbf', keep_dtype=True, ), - name="ds_report_basil_by_tt", + name='ds_report_basil_by_tt', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(basil_by_tt_plot, ds_report_basil_by_tt, [("out_file", "in_file")])]) + workflow.connect([(basil_by_tt_plot, ds_report_basil_by_tt, [('out_file', 'in_file')])]) pvc_summary = pe.Node( - CBFSummaryPlot(label="pvc", vmax=120), - name="pvc_summary", + CBFSummaryPlot(label='pvc', vmax=120), + name='pvc_summary', mem_gb=1, ) workflow.connect([ (inputnode, pvc_summary, [ - ("mean_cbf_gm_basil", "cbf"), - ("aslref", "ref_vol"), + ('mean_cbf_gm_basil', 'cbf'), + ('aslref', 'ref_vol'), ]), ]) # fmt:skip ds_report_pvc = pe.Node( - DerivativesDataSink(datatype="figures", desc="basilGM", suffix="cbf", keep_dtype=True), - name="ds_report_pvc", + DerivativesDataSink(datatype='figures', desc='basilGM', suffix='cbf', keep_dtype=True), + name='ds_report_pvc', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(pvc_summary, ds_report_pvc, [("out_file", "in_file")])]) + workflow.connect([(pvc_summary, ds_report_pvc, [('out_file', 'in_file')])]) pvc_by_tt_plot = pe.Node( CBFByTissueTypePlot(), - name="pvc_by_tt_plot", + name='pvc_by_tt_plot', ) workflow.connect([ - (inputnode, pvc_by_tt_plot, [("mean_cbf_gm_basil", "cbf")]), - (warp_t1w_dseg_to_aslref, pvc_by_tt_plot, [("output_image", "seg_file")]), + (inputnode, pvc_by_tt_plot, [('mean_cbf_gm_basil', 'cbf')]), + (warp_t1w_dseg_to_aslref, pvc_by_tt_plot, [('output_image', 'seg_file')]), ]) # fmt:skip ds_report_pvc_by_tt = pe.Node( DerivativesDataSink( - datatype="figures", - desc="basilGMByTissueType", - suffix="cbf", + datatype='figures', + desc='basilGMByTissueType', + suffix='cbf', keep_dtype=True, ), - name="ds_report_pvc_by_tt", + name='ds_report_pvc_by_tt', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(pvc_by_tt_plot, ds_report_pvc_by_tt, [("out_file", "in_file")])]) + workflow.connect([(pvc_by_tt_plot, ds_report_pvc_by_tt, [('out_file', 'in_file')])]) return workflow diff --git a/aslprep/workflows/asl/reference.py b/aslprep/workflows/asl/reference.py index 52f9a41a4..b76281d82 100644 --- a/aslprep/workflows/asl/reference.py +++ b/aslprep/workflows/asl/reference.py @@ -36,7 +36,7 @@ def init_raw_aslref_wf( asl_file=None, m0scan=False, use_ge=False, - name="raw_aslref_wf", + name='raw_aslref_wf', ): """Build a workflow that generates reference BOLD images for a series. @@ -98,23 +98,23 @@ def init_raw_aslref_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_file", - "aslcontext", - "m0scan", - "dummy_scans", + 'asl_file', + 'aslcontext', + 'm0scan', + 'dummy_scans', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "asl_file", - "aslref", - "validation_report", + 'asl_file', + 'aslref', + 'validation_report', ], ), - name="outputnode", + name='outputnode', ) # Simplify manually setting input image @@ -123,57 +123,57 @@ def init_raw_aslref_wf( val_asl = pe.Node( ValidateImage(), - name="val_asl", + name='val_asl', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, val_asl, [("asl_file", "in_file")]), + (inputnode, val_asl, [('asl_file', 'in_file')]), (val_asl, outputnode, [ - ("out_file", "asl_file"), - ("out_report", "validation_report"), + ('out_file', 'asl_file'), + ('out_report', 'validation_report'), ]), ]) # fmt:skip if m0scan: val_m0scan = pe.Node( ValidateImage(), - name="val_m0scan", + name='val_m0scan', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(inputnode, val_m0scan, [("m0scan", "in_file")])]) + workflow.connect([(inputnode, val_m0scan, [('m0scan', 'in_file')])]) select_highest_contrast_volumes = pe.Node( SelectHighestContrastVolumes(prioritize_m0=use_ge), - name="select_highest_contrast_volumes", + name='select_highest_contrast_volumes', mem_gb=1, ) workflow.connect([ - (inputnode, select_highest_contrast_volumes, [("aslcontext", "aslcontext")]), - (val_asl, select_highest_contrast_volumes, [("out_file", "asl_file")]), + (inputnode, select_highest_contrast_volumes, [('aslcontext', 'aslcontext')]), + (val_asl, select_highest_contrast_volumes, [('out_file', 'asl_file')]), ]) # fmt:skip if m0scan: - workflow.connect([(val_m0scan, select_highest_contrast_volumes, [("out_file", "m0scan")])]) + workflow.connect([(val_m0scan, select_highest_contrast_volumes, [('out_file', 'm0scan')])]) - gen_avg = pe.Node(RobustAverage(), name="gen_avg", mem_gb=1) + gen_avg = pe.Node(RobustAverage(), name='gen_avg', mem_gb=1) workflow.connect([ - (select_highest_contrast_volumes, gen_avg, [("selected_volumes_file", "in_file")]), + (select_highest_contrast_volumes, gen_avg, [('selected_volumes_file', 'in_file')]), ]) # fmt:skip if use_ge and (config.workflow.smooth_kernel > 0): workflow.__desc__ += ( - "The reference image was then smoothed with a Gaussian kernel " - f"(FWHM = {config.workflow.smooth_kernel} mm)." + 'The reference image was then smoothed with a Gaussian kernel ' + f'(FWHM = {config.workflow.smooth_kernel} mm).' ) smooth_reference = pe.Node( Smooth(fwhm=config.workflow.smooth_kernel), - name="smooth_reference", + name='smooth_reference', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (gen_avg, smooth_reference, [("out_file", "in_file")]), - (smooth_reference, outputnode, [("out_file", "aslref")]), + (gen_avg, smooth_reference, [('out_file', 'in_file')]), + (smooth_reference, outputnode, [('out_file', 'aslref')]), ]) # fmt:skip else: - workflow.connect([(gen_avg, outputnode, [("out_file", "aslref")])]) + workflow.connect([(gen_avg, outputnode, [('out_file', 'aslref')])]) return workflow diff --git a/aslprep/workflows/asl/resampling.py b/aslprep/workflows/asl/resampling.py index 7cbf1192d..953c5f6a8 100644 --- a/aslprep/workflows/asl/resampling.py +++ b/aslprep/workflows/asl/resampling.py @@ -42,14 +42,14 @@ def init_asl_surf_wf( *, mem_gb: float, - surface_spaces: ty.List[str], + surface_spaces: list[str], medial_surface_nan: bool, metadata: dict, # noqa: U100 - cbf_3d: ty.List[str], - cbf_4d: ty.List[str], - att: ty.List[str], + cbf_3d: list[str], + cbf_4d: list[str], + att: list[str], output_dir: str, - name: str = "asl_surf_wf", + name: str = 'asl_surf_wf', ): """Sample functional images to FreeSurfer surfaces. @@ -124,51 +124,51 @@ def init_asl_surf_wf( timing_parameters = prepare_timing_parameters(metadata) workflow = Workflow(name=name) - out_spaces_str = ", ".join([f"*{s}*" for s in surface_spaces]) + out_spaces_str = ', '.join([f'*{s}*' for s in surface_spaces]) workflow.__desc__ = f"""\ The CBF maps were resampled onto the following surfaces (FreeSurfer reconstruction nomenclature): {out_spaces_str}. """ inputnode_fields = [ - "source_file", - "anat", - "aslref2anat_xfm", - "subject_id", - "subjects_dir", - "fsnative2t1w_xfm", + 'source_file', + 'anat', + 'aslref2anat_xfm', + 'subject_id', + 'subjects_dir', + 'fsnative2t1w_xfm', ] inputnode_fields += cbf_3d inputnode_fields += cbf_4d inputnode_fields += att inputnode = pe.Node( niu.IdentityInterface(fields=inputnode_fields), - name="inputnode", + name='inputnode', ) - itersource = pe.Node(niu.IdentityInterface(fields=["target"]), name="itersource") - itersource.iterables = [("target", surface_spaces)] + itersource = pe.Node(niu.IdentityInterface(fields=['target']), name='itersource') + itersource.iterables = [('target', surface_spaces)] - get_fsnative = pe.Node(FreeSurferSource(), name="get_fsnative", run_without_submitting=True) + get_fsnative = pe.Node(FreeSurferSource(), name='get_fsnative', run_without_submitting=True) workflow.connect([ (inputnode, get_fsnative, [ - ("subject_id", "subject_id"), - ("subjects_dir", "subjects_dir") + ('subject_id', 'subject_id'), + ('subjects_dir', 'subjects_dir') ]), ]) # fmt:skip def select_target(subject_id, space): """Get the target subject ID, given a source subject ID and a target space.""" - return subject_id if space == "fsnative" else space + return subject_id if space == 'fsnative' else space targets = pe.Node( niu.Function(function=select_target), - name="targets", + name='targets', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, targets, [("subject_id", "subject_id")]), - (itersource, targets, [("target", "space")]), + (inputnode, targets, [('subject_id', 'subject_id')]), + (itersource, targets, [('target', 'space')]), ]) # fmt:skip for cbf_deriv in cbf_4d + cbf_3d + att: @@ -176,122 +176,122 @@ def select_target(subject_id, space): kwargs = {} if cbf_deriv in cbf_4d: - kwargs["dimension"] = 3 + kwargs['dimension'] = 3 if cbf_deriv in att: - meta = {"Units": "s"} + meta = {'Units': 's'} else: - meta = {"Units": "mL/100 g/min"} + meta = {'Units': 'mL/100 g/min'} warp_cbf_to_anat = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', float=True, input_image_type=3, - args="-v", + args='-v', **kwargs, ), - name=f"warp_{cbf_deriv}_to_anat", + name=f'warp_{cbf_deriv}_to_anat', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ (inputnode, warp_cbf_to_anat, [ - (cbf_deriv, "input_image"), - ("anat", "reference_image"), - ("aslref2anat_xfm", "transforms"), + (cbf_deriv, 'input_image'), + ('anat', 'reference_image'), + ('aslref2anat_xfm', 'transforms'), ]), ]) # fmt:skip itk2lta = pe.Node( - ConcatenateXFMs(out_fmt="fs", inverse=True), - name=f"itk2lta_{cbf_deriv}", + ConcatenateXFMs(out_fmt='fs', inverse=True), + name=f'itk2lta_{cbf_deriv}', run_without_submitting=True, ) workflow.connect([ - (inputnode, itk2lta, [("fsnative2t1w_xfm", "in_xfms")]), - (warp_cbf_to_anat, itk2lta, [("output_image", "moving")]), - (get_fsnative, itk2lta, [("T1", "reference")]), + (inputnode, itk2lta, [('fsnative2t1w_xfm', 'in_xfms')]), + (warp_cbf_to_anat, itk2lta, [('output_image', 'moving')]), + (get_fsnative, itk2lta, [('T1', 'reference')]), ]) # fmt:skip sampler = pe.MapNode( fs.SampleToSurface( - interp_method="trilinear", - out_type="gii", + interp_method='trilinear', + out_type='gii', override_reg_subj=True, - sampling_method="average", + sampling_method='average', sampling_range=(0, 1, 0.2), - sampling_units="frac", + sampling_units='frac', ), - iterfield=["hemi"], - name=f"sampler_{cbf_deriv}", + iterfield=['hemi'], + name=f'sampler_{cbf_deriv}', mem_gb=mem_gb * 3, ) - sampler.inputs.hemi = ["lh", "rh"] + sampler.inputs.hemi = ['lh', 'rh'] workflow.connect([ (inputnode, sampler, [ - ("subjects_dir", "subjects_dir"), - ("subject_id", "subject_id"), + ('subjects_dir', 'subjects_dir'), + ('subject_id', 'subject_id'), ]), - (warp_cbf_to_anat, sampler, [("output_image", "source_file")]), - (itk2lta, sampler, [("out_inv", "reg_file")]), - (targets, sampler, [("out", "target_subject")]), + (warp_cbf_to_anat, sampler, [('output_image', 'source_file')]), + (itk2lta, sampler, [('out_inv', 'reg_file')]), + (targets, sampler, [('out', 'target_subject')]), ]) # fmt:skip update_metadata = pe.MapNode( GiftiSetAnatomicalStructure(), - iterfield=["in_file"], - name=f"update_{cbf_deriv}_metadata", + iterfield=['in_file'], + name=f'update_{cbf_deriv}_metadata', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) ds_surfs = pe.MapNode( DerivativesDataSink( base_directory=output_dir, - extension=".func.gii", + extension='.func.gii', **timing_parameters, **fields, **meta, ), - iterfield=["in_file", "hemi"], - name=f"ds_{cbf_deriv}_surfs", + iterfield=['in_file', 'hemi'], + name=f'ds_{cbf_deriv}_surfs', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - ds_surfs.inputs.hemi = ["L", "R"] + ds_surfs.inputs.hemi = ['L', 'R'] workflow.connect([ - (inputnode, ds_surfs, [("source_file", "source_file")]), - (itersource, ds_surfs, [("target", "space")]), - (update_metadata, ds_surfs, [("out_file", "in_file")]), + (inputnode, ds_surfs, [('source_file', 'source_file')]), + (itersource, ds_surfs, [('target', 'space')]), + (update_metadata, ds_surfs, [('out_file', 'in_file')]), ]) # fmt:skip # Refine if medial vertices should be NaNs medial_nans = pe.MapNode( MedialNaNs(), - iterfield=["in_file"], - name=f"medial_nans_{cbf_deriv}", + iterfield=['in_file'], + name=f'medial_nans_{cbf_deriv}', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) if medial_surface_nan: # fmt: off workflow.connect([ - (inputnode, medial_nans, [("subjects_dir", "subjects_dir")]), - (sampler, medial_nans, [("out_file", "in_file")]), - (medial_nans, update_metadata, [("out_file", "in_file")]), + (inputnode, medial_nans, [('subjects_dir', 'subjects_dir')]), + (sampler, medial_nans, [('out_file', 'in_file')]), + (medial_nans, update_metadata, [('out_file', 'in_file')]), ]) # fmt: on else: - workflow.connect([(sampler, update_metadata, [("out_file", "in_file")])]) + workflow.connect([(sampler, update_metadata, [('out_file', 'in_file')])]) return workflow def init_bold_fsLR_resampling_wf( # noqa: N802 - grayord_density: ty.Literal["91k", "170k"], + grayord_density: ty.Literal['91k', '170k'], omp_nthreads: int, mem_gb: float, - name: str = "bold_fsLR_resampling_wf", + name: str = 'bold_fsLR_resampling_wf', ): """Resample BOLD time series to fsLR surface. @@ -357,7 +357,7 @@ def init_bold_fsLR_resampling_wf( # noqa: N802 from niworkflows.interfaces.utility import KeySelect from smriprep import data as smriprep_data - fslr_density = "32k" if grayord_density == "91k" else "59k" + fslr_density = '32k' if grayord_density == '91k' else '59k' workflow = Workflow(name=name) @@ -369,132 +369,132 @@ def init_bold_fsLR_resampling_wf( # noqa: N802 inputnode = pe.Node( niu.IdentityInterface( fields=[ - "bold_file", - "white", - "pial", - "midthickness", - "midthickness_fsLR", - "sphere_reg_fsLR", - "cortex_mask", - "volume_roi", + 'bold_file', + 'white', + 'pial', + 'midthickness', + 'midthickness_fsLR', + 'sphere_reg_fsLR', + 'cortex_mask', + 'volume_roi', ] ), - name="inputnode", + name='inputnode', ) hemisource = pe.Node( - niu.IdentityInterface(fields=["hemi"]), - name="hemisource", - iterables=[("hemi", ["L", "R"])], + niu.IdentityInterface(fields=['hemi']), + name='hemisource', + iterables=[('hemi', ['L', 'R'])], ) joinnode = pe.JoinNode( - niu.IdentityInterface(fields=["bold_fsLR"]), - name="joinnode", - joinsource="hemisource", + niu.IdentityInterface(fields=['bold_fsLR']), + name='joinnode', + joinsource='hemisource', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["bold_fsLR"]), - name="outputnode", + niu.IdentityInterface(fields=['bold_fsLR']), + name='outputnode', ) # select white, midthickness and pial surfaces based on hemi select_surfaces = pe.Node( KeySelect( fields=[ - "white", - "pial", - "midthickness", - "midthickness_fsLR", - "sphere_reg_fsLR", - "template_sphere", - "cortex_mask", - "template_roi", + 'white', + 'pial', + 'midthickness', + 'midthickness_fsLR', + 'sphere_reg_fsLR', + 'template_sphere', + 'cortex_mask', + 'template_roi', ], - keys=["L", "R"], + keys=['L', 'R'], ), - name="select_surfaces", + name='select_surfaces', run_without_submitting=True, ) select_surfaces.inputs.template_sphere = [ str(sphere) for sphere in tf.get( - template="fsLR", + template='fsLR', density=fslr_density, - suffix="sphere", + suffix='sphere', space=None, - extension=".surf.gii", + extension='.surf.gii', ) ] - atlases = smriprep_data.load_resource("atlases") + atlases = smriprep_data.load_resource('atlases') select_surfaces.inputs.template_roi = [ - str(atlases / f"L.atlasroi.{fslr_density}_fs_LR.shape.gii"), - str(atlases / f"R.atlasroi.{fslr_density}_fs_LR.shape.gii"), + str(atlases / f'L.atlasroi.{fslr_density}_fs_LR.shape.gii'), + str(atlases / f'R.atlasroi.{fslr_density}_fs_LR.shape.gii'), ] # RibbonVolumeToSurfaceMapping.sh # Line 85 thru ... volume_to_surface = pe.Node( - VolumeToSurfaceMapping(method="ribbon-constrained"), - name="volume_to_surface", + VolumeToSurfaceMapping(method='ribbon-constrained'), + name='volume_to_surface', mem_gb=mem_gb * 3, n_procs=omp_nthreads, ) metric_dilate = pe.Node( MetricDilate(distance=10, nearest=True), - name="metric_dilate", + name='metric_dilate', mem_gb=1, n_procs=omp_nthreads, ) - mask_native = pe.Node(MetricMask(), name="mask_native") + mask_native = pe.Node(MetricMask(), name='mask_native') resample_to_fsLR = pe.Node( - MetricResample(method="ADAP_BARY_AREA", area_surfs=True), - name="resample_to_fsLR", + MetricResample(method='ADAP_BARY_AREA', area_surfs=True), + name='resample_to_fsLR', mem_gb=1, n_procs=omp_nthreads, ) # ... line 89 - mask_fsLR = pe.Node(MetricMask(), name="mask_fsLR") + mask_fsLR = pe.Node(MetricMask(), name='mask_fsLR') workflow.connect([ (inputnode, select_surfaces, [ - ("white", "white"), - ("pial", "pial"), - ("midthickness", "midthickness"), - ("midthickness_fsLR", "midthickness_fsLR"), - ("sphere_reg_fsLR", "sphere_reg_fsLR"), - ("cortex_mask", "cortex_mask"), + ('white', 'white'), + ('pial', 'pial'), + ('midthickness', 'midthickness'), + ('midthickness_fsLR', 'midthickness_fsLR'), + ('sphere_reg_fsLR', 'sphere_reg_fsLR'), + ('cortex_mask', 'cortex_mask'), ]), - (hemisource, select_surfaces, [("hemi", "key")]), + (hemisource, select_surfaces, [('hemi', 'key')]), # Resample BOLD to native surface, dilate and mask (inputnode, volume_to_surface, [ - ("bold_file", "volume_file"), - ("volume_roi", "volume_roi"), + ('bold_file', 'volume_file'), + ('volume_roi', 'volume_roi'), ]), (select_surfaces, volume_to_surface, [ - ("midthickness", "surface_file"), - ("white", "inner_surface"), - ("pial", "outer_surface"), + ('midthickness', 'surface_file'), + ('white', 'inner_surface'), + ('pial', 'outer_surface'), ]), - (select_surfaces, metric_dilate, [("midthickness", "surf_file")]), - (select_surfaces, mask_native, [("cortex_mask", "mask")]), - (volume_to_surface, metric_dilate, [("out_file", "in_file")]), - (metric_dilate, mask_native, [("out_file", "in_file")]), + (select_surfaces, metric_dilate, [('midthickness', 'surf_file')]), + (select_surfaces, mask_native, [('cortex_mask', 'mask')]), + (volume_to_surface, metric_dilate, [('out_file', 'in_file')]), + (metric_dilate, mask_native, [('out_file', 'in_file')]), # Resample BOLD to fsLR and mask (select_surfaces, resample_to_fsLR, [ - ("sphere_reg_fsLR", "current_sphere"), - ("template_sphere", "new_sphere"), - ("midthickness", "current_area"), - ("midthickness_fsLR", "new_area"), - ("cortex_mask", "roi_metric"), + ('sphere_reg_fsLR', 'current_sphere'), + ('template_sphere', 'new_sphere'), + ('midthickness', 'current_area'), + ('midthickness_fsLR', 'new_area'), + ('cortex_mask', 'roi_metric'), ]), - (mask_native, resample_to_fsLR, [("out_file", "in_file")]), - (select_surfaces, mask_fsLR, [("template_roi", "mask")]), - (resample_to_fsLR, mask_fsLR, [("out_file", "in_file")]), + (mask_native, resample_to_fsLR, [('out_file', 'in_file')]), + (select_surfaces, mask_fsLR, [('template_roi', 'mask')]), + (resample_to_fsLR, mask_fsLR, [('out_file', 'in_file')]), # Output - (mask_fsLR, joinnode, [("out_file", "bold_fsLR")]), - (joinnode, outputnode, [("bold_fsLR", "bold_fsLR")]), + (mask_fsLR, joinnode, [('out_file', 'bold_fsLR')]), + (joinnode, outputnode, [('bold_fsLR', 'bold_fsLR')]), ]) # fmt:skip return workflow diff --git a/aslprep/workflows/base.py b/aslprep/workflows/base.py index 5f025c3dd..a61284523 100644 --- a/aslprep/workflows/base.py +++ b/aslprep/workflows/base.py @@ -46,7 +46,7 @@ def init_aslprep_wf(): ver = Version(config.environment.version) - aslprep_wf = Workflow(name=f"aslprep_{ver.major}_{ver.minor}_wf") + aslprep_wf = Workflow(name=f'aslprep_{ver.major}_{ver.minor}_wf') aslprep_wf.base_dir = config.execution.work_dir freesurfer = config.workflow.run_reconall @@ -54,9 +54,9 @@ def init_aslprep_wf(): fsdir = pe.Node( BIDSFreeSurferDir( derivatives=config.execution.output_dir, - freesurfer_home=os.getenv("FREESURFER_HOME"), + freesurfer_home=os.getenv('FREESURFER_HOME'), spaces=config.workflow.spaces.get_fs_spaces(), - minimum_fs_version="7.0.0", + minimum_fs_version='7.0.0', ), name=f"fsdir_run_{config.execution.run_uuid.replace('-', '_')}", run_without_submitting=True, @@ -67,22 +67,22 @@ def init_aslprep_wf(): for subject_id in config.execution.participant_label: single_subject_wf = init_single_subject_wf(subject_id) - single_subject_wf.config["execution"]["crashdump_dir"] = str( - config.execution.aslprep_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + single_subject_wf.config['execution']['crashdump_dir'] = str( + config.execution.aslprep_dir / f'sub-{subject_id}' / 'log' / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) if freesurfer: - aslprep_wf.connect(fsdir, "subjects_dir", single_subject_wf, "inputnode.subjects_dir") + aslprep_wf.connect(fsdir, 'subjects_dir', single_subject_wf, 'inputnode.subjects_dir') else: aslprep_wf.add_nodes([single_subject_wf]) # Dump a copy of the config file into the log directory log_dir = ( - config.execution.aslprep_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + config.execution.aslprep_dir / f'sub-{subject_id}' / 'log' / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) - config.to_filename(log_dir / "aslprep.toml") + config.to_filename(log_dir / 'aslprep.toml') return aslprep_wf @@ -139,7 +139,7 @@ def init_single_subject_wf(subject_id: str): from aslprep.utils.bids import collect_data - workflow = Workflow(name=f"sub_{subject_id}_wf") + workflow = Workflow(name=f'sub_{subject_id}_wf') workflow.__desc__ = f""" ### Arterial Spin-Labeled MRI Preprocessing and Cerebral Blood Flow Computation @@ -173,20 +173,20 @@ def init_single_subject_wf(subject_id: str): bids_filters=config.execution.bids_filters, ) - if "flair" in config.workflow.ignore: - subject_data["flair"] = [] + if 'flair' in config.workflow.ignore: + subject_data['flair'] = [] - if "t2w" in config.workflow.ignore: - subject_data["t2w"] = [] + if 't2w' in config.workflow.ignore: + subject_data['t2w'] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks - if not anat_only and not subject_data["asl"]: + if not anat_only and not subject_data['asl']: raise RuntimeError( - f"No ASL images found for participant {subject_id}. All workflows require ASL images." + f'No ASL images found for participant {subject_id}. All workflows require ASL images.' ) - if subject_data["roi"]: + if subject_data['roi']: warnings.warn( f"Lesion mask {subject_data['roi']} found. " "Future versions of fMRIPrep will use alternative conventions. " @@ -202,7 +202,7 @@ def init_single_subject_wf(subject_id: str): from smriprep.utils.bids import collect_derivatives as collect_anat_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3,)) - std_spaces.append("fsnative") + std_spaces.append('fsnative') for deriv_dir in config.execution.derivatives: anatomical_cache.update( collect_anat_derivatives( @@ -212,7 +212,7 @@ def init_single_subject_wf(subject_id: str): ) ) - inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode') bidssrc = pe.Node( BIDSDataGrabber( @@ -220,12 +220,12 @@ def init_single_subject_wf(subject_id: str): anat_only=config.workflow.anat_only, subject_id=subject_id, ), - name="bidssrc", + name='bidssrc', ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), - name="bids_info", + name='bids_info', ) summary = pe.Node( @@ -233,35 +233,35 @@ def init_single_subject_wf(subject_id: str): std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), - name="summary", + name='summary', run_without_submitting=True, ) about = pe.Node( - AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), - name="about", + AboutSummary(version=config.environment.version, command=' '.join(sys.argv)), + name='about', run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=config.execution.aslprep_dir, - desc="summary", - datatype="figures", - dismiss_entities=("echo",), + desc='summary', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_summary", + name='ds_report_summary', run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=config.execution.aslprep_dir, - desc="about", - datatype="figures", - dismiss_entities=("echo",), + desc='about', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_about", + name='ds_report_about', run_without_submitting=True, ) @@ -277,9 +277,9 @@ def init_single_subject_wf(subject_id: str): hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, msm_sulc=msm_sulc, - t1w=subject_data["t1w"], - t2w=subject_data["t2w"], - flair=subject_data["flair"], + t1w=subject_data['t1w'], + t2w=subject_data['t2w'], + flair=subject_data['flair'], skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string(config.workflow.skull_strip_template)[0], spaces=spaces, @@ -290,83 +290,83 @@ def init_single_subject_wf(subject_id: str): ) workflow.connect([ - (inputnode, anat_fit_wf, [("subjects_dir", "inputnode.subjects_dir")]), - (bidssrc, bids_info, [(("t1w", fix_multi_T1w_source_name), "in_file")]), + (inputnode, anat_fit_wf, [('subjects_dir', 'inputnode.subjects_dir')]), + (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]), (bidssrc, anat_fit_wf, [ - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), - ("roi", "inputnode.roi"), - ("flair", "inputnode.flair"), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), + ('roi', 'inputnode.roi'), + ('flair', 'inputnode.flair'), ]), - (bids_info, anat_fit_wf, [(("subject", _prefix), "inputnode.subject_id")]), + (bids_info, anat_fit_wf, [(('subject', _prefix), 'inputnode.subject_id')]), # Reporting connections - (inputnode, summary, [("subjects_dir", "subjects_dir")]), + (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [ - ("t1w", "t1w"), - ("t2w", "t2w"), - ("asl", "asl"), + ('t1w', 't1w'), + ('t2w', 't2w'), + ('asl', 'asl'), ]), - (bids_info, summary, [("subject", "subject_id")]), - (bidssrc, ds_report_summary, [(("t1w", fix_multi_T1w_source_name), "source_file")]), - (bidssrc, ds_report_about, [(("t1w", fix_multi_T1w_source_name), "source_file")]), - (summary, ds_report_summary, [("out_report", "in_file")]), - (about, ds_report_about, [("out_report", "in_file")]), + (bids_info, summary, [('subject', 'subject_id')]), + (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), + (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), + (summary, ds_report_summary, [('out_report', 'in_file')]), + (about, ds_report_about, [('out_report', 'in_file')]), ]) # fmt:skip # Set up the template iterator once, if used template_iterator_wf = None select_MNI2009c_xfm = None - if config.workflow.level == "full": + if config.workflow.level == 'full': if spaces.cached.get_spaces(nonstandard=False, dim=(3,)): template_iterator_wf = init_template_iterator_wf(spaces=spaces) ds_std_volumes_wf = init_ds_anat_volumes_wf( bids_root=bids_root, output_dir=aslprep_dir, - name="ds_std_volumes_wf", + name='ds_std_volumes_wf', ) workflow.connect([ (anat_fit_wf, template_iterator_wf, [ - ("outputnode.template", "inputnode.template"), - ("outputnode.anat2std_xfm", "inputnode.anat2std_xfm"), + ('outputnode.template', 'inputnode.template'), + ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ]), (anat_fit_wf, ds_std_volumes_wf, [ - ("outputnode.t1w_valid_list", "inputnode.source_files"), - ("outputnode.t1w_preproc", "inputnode.t1w_preproc"), - ("outputnode.t1w_mask", "inputnode.t1w_mask"), - ("outputnode.t1w_dseg", "inputnode.t1w_dseg"), - ("outputnode.t1w_tpms", "inputnode.t1w_tpms"), + ('outputnode.t1w_valid_list', 'inputnode.source_files'), + ('outputnode.t1w_preproc', 'inputnode.t1w_preproc'), + ('outputnode.t1w_mask', 'inputnode.t1w_mask'), + ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'), + ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'), ]), (template_iterator_wf, ds_std_volumes_wf, [ - ("outputnode.std_t1w", "inputnode.ref_file"), - ("outputnode.anat2std_xfm", "inputnode.anat2std_xfm"), - ("outputnode.space", "inputnode.space"), - ("outputnode.cohort", "inputnode.cohort"), - ("outputnode.resolution", "inputnode.resolution"), + ('outputnode.std_t1w', 'inputnode.ref_file'), + ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), + ('outputnode.space', 'inputnode.space'), + ('outputnode.cohort', 'inputnode.cohort'), + ('outputnode.resolution', 'inputnode.resolution'), ]), ]) # fmt:skip - if "MNI152NLin2009cAsym" in spaces.get_spaces(): + if 'MNI152NLin2009cAsym' in spaces.get_spaces(): select_MNI2009c_xfm = pe.Node( - KeySelect(fields=["std2anat_xfm"], key="MNI152NLin2009cAsym"), - name="select_MNI2009c_xfm", + KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'), + name='select_MNI2009c_xfm', run_without_submitting=True, ) workflow.connect([ (anat_fit_wf, select_MNI2009c_xfm, [ - ("outputnode.std2anat_xfm", "std2anat_xfm"), - ("outputnode.template", "keys"), + ('outputnode.std2anat_xfm', 'std2anat_xfm'), + ('outputnode.template', 'keys'), ]), ]) # fmt:skip select_MNI2009c_xfm_fw = pe.Node( - KeySelect(fields=["anat2std_xfm"], key="MNI152NLin2009cAsym"), - name="select_MNI2009c_xfm_fw", + KeySelect(fields=['anat2std_xfm'], key='MNI152NLin2009cAsym'), + name='select_MNI2009c_xfm_fw', run_without_submitting=True, ) workflow.connect([ (anat_fit_wf, select_MNI2009c_xfm_fw, [ - ("outputnode.anat2std_xfm", "anat2std_xfm"), - ("outputnode.template", "keys"), + ('outputnode.anat2std_xfm', 'anat2std_xfm'), + ('outputnode.template', 'keys'), ]), ]) # fmt:skip @@ -378,28 +378,28 @@ def init_single_subject_wf(subject_id: str): from smriprep.interfaces.templateflow import TemplateFlowSelect ref = Reference( - "MNI152NLin6Asym", - {"res": 2 if config.workflow.cifti_output == "91k" else 1}, + 'MNI152NLin6Asym', + {'res': 2 if config.workflow.cifti_output == '91k' else 1}, ) select_MNI6_xfm = pe.Node( - KeySelect(fields=["anat2std_xfm"], key=ref.fullname), - name="select_MNI6", + KeySelect(fields=['anat2std_xfm'], key=ref.fullname), + name='select_MNI6', run_without_submitting=True, ) select_MNI6_tpl = pe.Node( - TemplateFlowSelect(template=ref.fullname, resolution=ref.spec["res"]), - name="select_MNI6_tpl", + TemplateFlowSelect(template=ref.fullname, resolution=ref.spec['res']), + name='select_MNI6_tpl', ) workflow.connect([ (anat_fit_wf, select_MNI6_xfm, [ - ("outputnode.anat2std_xfm", "anat2std_xfm"), - ("outputnode.template", "keys"), + ('outputnode.anat2std_xfm', 'anat2std_xfm'), + ('outputnode.template', 'keys'), ]), ]) # fmt:skip # Create CIFTI morphometrics - curv_wf = init_gifti_morphometrics_wf(morphometrics=["curv"], name="curv_wf") + curv_wf = init_gifti_morphometrics_wf(morphometrics=['curv'], name='curv_wf') hcp_morphometrics_wf = init_hcp_morphometrics_wf(omp_nthreads=omp_nthreads) morph_grayords_wf = init_morph_grayords_wf( grayord_density=config.workflow.cifti_output, @@ -411,57 +411,57 @@ def init_single_subject_wf(subject_id: str): ds_grayord_metrics_wf = init_ds_grayord_metrics_wf( bids_root=bids_root, output_dir=aslprep_dir, - metrics=["curv", "thickness", "sulc"], + metrics=['curv', 'thickness', 'sulc'], cifti_output=config.workflow.cifti_output, ) workflow.connect([ (anat_fit_wf, curv_wf, [ - ("outputnode.subject_id", "inputnode.subject_id"), - ("outputnode.subjects_dir", "inputnode.subjects_dir"), + ('outputnode.subject_id', 'inputnode.subject_id'), + ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ]), (anat_fit_wf, hcp_morphometrics_wf, [ - ("outputnode.subject_id", "inputnode.subject_id"), - ("outputnode.thickness", "inputnode.thickness"), - ("outputnode.sulc", "inputnode.sulc"), - ("outputnode.midthickness", "inputnode.midthickness"), + ('outputnode.subject_id', 'inputnode.subject_id'), + ('outputnode.thickness', 'inputnode.thickness'), + ('outputnode.sulc', 'inputnode.sulc'), + ('outputnode.midthickness', 'inputnode.midthickness'), ]), (curv_wf, hcp_morphometrics_wf, [ - ("outputnode.curv", "inputnode.curv"), + ('outputnode.curv', 'inputnode.curv'), ]), (anat_fit_wf, resample_midthickness_wf, [ - ("outputnode.midthickness", "inputnode.midthickness"), + ('outputnode.midthickness', 'inputnode.midthickness'), ( f"outputnode.sphere_reg_{'msm' if msm_sulc else 'fsLR'}", - "inputnode.sphere_reg_fsLR", + 'inputnode.sphere_reg_fsLR', ), ]), (anat_fit_wf, morph_grayords_wf, [ - ("outputnode.midthickness", "inputnode.midthickness"), + ('outputnode.midthickness', 'inputnode.midthickness'), ( f'outputnode.sphere_reg_{"msm" if msm_sulc else "fsLR"}', - "inputnode.sphere_reg_fsLR", + 'inputnode.sphere_reg_fsLR', ), ]), (hcp_morphometrics_wf, morph_grayords_wf, [ - ("outputnode.curv", "inputnode.curv"), - ("outputnode.thickness", "inputnode.thickness"), - ("outputnode.sulc", "inputnode.sulc"), - ("outputnode.roi", "inputnode.roi"), + ('outputnode.curv', 'inputnode.curv'), + ('outputnode.thickness', 'inputnode.thickness'), + ('outputnode.sulc', 'inputnode.sulc'), + ('outputnode.roi', 'inputnode.roi'), ]), (resample_midthickness_wf, morph_grayords_wf, [ - ("outputnode.midthickness_fsLR", "inputnode.midthickness_fsLR"), + ('outputnode.midthickness_fsLR', 'inputnode.midthickness_fsLR'), ]), (anat_fit_wf, ds_grayord_metrics_wf, [ - ("outputnode.t1w_valid_list", "inputnode.source_files"), + ('outputnode.t1w_valid_list', 'inputnode.source_files'), ]), (morph_grayords_wf, ds_grayord_metrics_wf, [ - ("outputnode.curv_fsLR", "inputnode.curv"), - ("outputnode.curv_metadata", "inputnode.curv_metadata"), - ("outputnode.thickness_fsLR", "inputnode.thickness"), - ("outputnode.thickness_metadata", "inputnode.thickness_metadata"), - ("outputnode.sulc_fsLR", "inputnode.sulc"), - ("outputnode.sulc_metadata", "inputnode.sulc_metadata"), + ('outputnode.curv_fsLR', 'inputnode.curv'), + ('outputnode.curv_metadata', 'inputnode.curv_metadata'), + ('outputnode.thickness_fsLR', 'inputnode.thickness'), + ('outputnode.thickness_metadata', 'inputnode.thickness_metadata'), + ('outputnode.sulc_fsLR', 'inputnode.sulc'), + ('outputnode.sulc_metadata', 'inputnode.sulc_metadata'), ]), ]) # fmt:skip @@ -471,25 +471,25 @@ def init_single_subject_wf(subject_id: str): fmap_estimators, estimator_map = map_fieldmap_estimation( layout=config.execution.layout, subject_id=subject_id, - bold_data=subject_data["asl"], - ignore_fieldmaps="fieldmaps" in config.workflow.ignore, + bold_data=subject_data['asl'], + ignore_fieldmaps='fieldmaps' in config.workflow.ignore, use_syn=config.workflow.use_syn_sdc, force_syn=config.workflow.force_syn, - filters=config.execution.get().get("bids_filters", {}).get("fmap"), + filters=config.execution.get().get('bids_filters', {}).get('fmap'), ) if fmap_estimators: config.loggers.workflow.info( - "B0 field inhomogeneity map will be estimated with the following " - f"{len(fmap_estimators)} estimator(s): " - f"{[e.method for e in fmap_estimators]}." + 'B0 field inhomogeneity map will be estimated with the following ' + f'{len(fmap_estimators)} estimator(s): ' + f'{[e.method for e in fmap_estimators]}.' ) from sdcflows import fieldmaps as fm from sdcflows.workflows.base import init_fmap_preproc_wf fmap_wf = init_fmap_preproc_wf( - debug="fieldmaps" in config.execution.debug, + debug='fieldmaps' in config.execution.debug, estimators=fmap_estimators, omp_nthreads=omp_nthreads, output_dir=aslprep_dir, @@ -505,19 +505,19 @@ def init_single_subject_wf(subject_id: str): # Overwrite ``out_path_base`` of sdcflows's DataSinks for node in fmap_wf.list_node_names(): - if node.split(".")[-1].startswith("ds_"): - fmap_wf.get_node(node).interface.out_path_base = "" + if node.split('.')[-1].startswith('ds_'): + fmap_wf.get_node(node).interface.out_path_base = '' fmap_select_std = pe.Node( - KeySelect(fields=["std2anat_xfm"], key="MNI152NLin2009cAsym"), - name="fmap_select_std", + KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'), + name='fmap_select_std', run_without_submitting=True, ) if any(estimator.method == fm.EstimatorType.ANAT for estimator in fmap_estimators): workflow.connect([ (anat_fit_wf, fmap_select_std, [ - ("outputnode.std2anat_xfm", "std2anat_xfm"), - ("outputnode.template", "keys")]), + ('outputnode.std2anat_xfm', 'std2anat_xfm'), + ('outputnode.template', 'keys')]), ]) # fmt:skip for estimator in fmap_estimators: @@ -535,13 +535,13 @@ def init_single_subject_wf(subject_id: str): if estimator.method == fm.EstimatorType.PEPOLAR: if len(suffixes) == 2 and all( - suf in ("epi", "m0scan", "sbref") for suf in suffixes + suf in ('epi', 'm0scan', 'sbref') for suf in suffixes ): - wf_inputs = getattr(fmap_wf.inputs, f"in_{estimator.bids_id}") + wf_inputs = getattr(fmap_wf.inputs, f'in_{estimator.bids_id}') wf_inputs.in_data = [str(s.path) for s in estimator.sources] wf_inputs.metadata = [s.metadata for s in estimator.sources] else: - raise NotImplementedError("Sophisticated PEPOLAR schemes are unsupported.") + raise NotImplementedError('Sophisticated PEPOLAR schemes are unsupported.') elif estimator.method == fm.EstimatorType.ANAT: from sdcflows.workflows.fit.syn import init_syn_preprocessing_wf @@ -549,38 +549,38 @@ def init_single_subject_wf(subject_id: str): sources = [ str(s.path) for s in estimator.sources - if s.suffix in ("asl", "m0scan", "sbref") + if s.suffix in ('asl', 'm0scan', 'sbref') ] source_meta = [ - s.metadata for s in estimator.sources if s.suffix in ("asl", "m0scan", "sbref") + s.metadata for s in estimator.sources if s.suffix in ('asl', 'm0scan', 'sbref') ] syn_preprocessing_wf = init_syn_preprocessing_wf( omp_nthreads=config.nipype.omp_nthreads, debug=config.execution.sloppy, auto_bold_nss=False, # I don't trust NSS estimation on ASL data t1w_inversion=False, - name=f"syn_preprocessing_{estimator.bids_id}", + name=f'syn_preprocessing_{estimator.bids_id}', ) syn_preprocessing_wf.inputs.inputnode.in_epis = sources syn_preprocessing_wf.inputs.inputnode.in_meta = source_meta # Use all volumes of each run. - run_lengths = [nb.load(f).shape[3] for f in subject_data["asl"]] + run_lengths = [nb.load(f).shape[3] for f in subject_data['asl']] syn_preprocessing_wf.inputs.inputnode.t_masks = [[True] * rl for rl in run_lengths] workflow.connect([ (anat_fit_wf, syn_preprocessing_wf, [ - ("outputnode.t1w_preproc", "inputnode.in_anat"), - ("outputnode.t1w_mask", "inputnode.mask_anat"), + ('outputnode.t1w_preproc', 'inputnode.in_anat'), + ('outputnode.t1w_mask', 'inputnode.mask_anat'), ]), (fmap_select_std, syn_preprocessing_wf, [ - ("std2anat_xfm", "inputnode.std2anat_xfm"), + ('std2anat_xfm', 'inputnode.std2anat_xfm'), ]), (syn_preprocessing_wf, fmap_wf, [ - ("outputnode.epi_ref", f"in_{estimator.bids_id}.epi_ref"), - ("outputnode.epi_mask", f"in_{estimator.bids_id}.epi_mask"), - ("outputnode.anat_ref", f"in_{estimator.bids_id}.anat_ref"), - ("outputnode.anat_mask", f"in_{estimator.bids_id}.anat_mask"), - ("outputnode.sd_prior", f"in_{estimator.bids_id}.sd_prior"), + ('outputnode.epi_ref', f'in_{estimator.bids_id}.epi_ref'), + ('outputnode.epi_mask', f'in_{estimator.bids_id}.epi_mask'), + ('outputnode.anat_ref', f'in_{estimator.bids_id}.anat_ref'), + ('outputnode.anat_mask', f'in_{estimator.bids_id}.anat_mask'), + ('outputnode.sd_prior', f'in_{estimator.bids_id}.sd_prior'), ]), ]) # fmt:skip @@ -594,7 +594,7 @@ def init_single_subject_wf(subject_id: str): """ asl_preproc_wfs = [] - for asl_file in subject_data["asl"]: + for asl_file in subject_data['asl']: fieldmap_id = estimator_map.get(asl_file) functional_cache = {} @@ -623,25 +623,25 @@ def init_single_subject_wf(subject_id: str): if asl_wf is None: continue - asl_wf.__desc__ = asl_pre_desc + (asl_wf.__desc__ or "") + asl_wf.__desc__ = asl_pre_desc + (asl_wf.__desc__ or '') workflow.connect([ (anat_fit_wf, asl_wf, [ - ("outputnode.t1w_preproc", "inputnode.t1w_preproc"), - ("outputnode.t1w_mask", "inputnode.t1w_mask"), - ("outputnode.t1w_dseg", "inputnode.t1w_dseg"), - ("outputnode.t1w_tpms", "inputnode.t1w_tpms"), + ('outputnode.t1w_preproc', 'inputnode.t1w_preproc'), + ('outputnode.t1w_mask', 'inputnode.t1w_mask'), + ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'), + ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'), # Undefined if --fs-no-reconall, but this is safe - ("outputnode.subjects_dir", "inputnode.subjects_dir"), - ("outputnode.subject_id", "inputnode.subject_id"), - ("outputnode.anat_ribbon", "inputnode.anat_ribbon"), - ("outputnode.fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), - ("outputnode.white", "inputnode.white"), - ("outputnode.pial", "inputnode.pial"), - ("outputnode.midthickness", "inputnode.midthickness"), + ('outputnode.subjects_dir', 'inputnode.subjects_dir'), + ('outputnode.subject_id', 'inputnode.subject_id'), + ('outputnode.anat_ribbon', 'inputnode.anat_ribbon'), + ('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'), + ('outputnode.white', 'inputnode.white'), + ('outputnode.pial', 'inputnode.pial'), + ('outputnode.midthickness', 'inputnode.midthickness'), ( f'outputnode.sphere_reg_{"msm" if msm_sulc else "fsLR"}', - "inputnode.sphere_reg_fsLR", + 'inputnode.sphere_reg_fsLR', ), ]), ]) # fmt:skip @@ -649,35 +649,35 @@ def init_single_subject_wf(subject_id: str): if fieldmap_id: workflow.connect([ (fmap_wf, asl_wf, [ - ("outputnode.fmap", "inputnode.fmap"), - ("outputnode.fmap_ref", "inputnode.fmap_ref"), - ("outputnode.fmap_coeff", "inputnode.fmap_coeff"), - ("outputnode.fmap_mask", "inputnode.fmap_mask"), - ("outputnode.fmap_id", "inputnode.fmap_id"), - ("outputnode.method", "inputnode.sdc_method"), + ('outputnode.fmap', 'inputnode.fmap'), + ('outputnode.fmap_ref', 'inputnode.fmap_ref'), + ('outputnode.fmap_coeff', 'inputnode.fmap_coeff'), + ('outputnode.fmap_mask', 'inputnode.fmap_mask'), + ('outputnode.fmap_id', 'inputnode.fmap_id'), + ('outputnode.method', 'inputnode.sdc_method'), ]), ]) # fmt:skip - if config.workflow.level == "full": + if config.workflow.level == 'full': if template_iterator_wf is not None: workflow.connect([ (template_iterator_wf, asl_wf, [ - ("outputnode.anat2std_xfm", "inputnode.anat2std_xfm"), - ("outputnode.space", "inputnode.std_space"), - ("outputnode.resolution", "inputnode.std_resolution"), - ("outputnode.cohort", "inputnode.std_cohort"), - ("outputnode.std_t1w", "inputnode.std_t1w"), - ("outputnode.std_mask", "inputnode.std_mask"), + ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), + ('outputnode.space', 'inputnode.std_space'), + ('outputnode.resolution', 'inputnode.std_resolution'), + ('outputnode.cohort', 'inputnode.std_cohort'), + ('outputnode.std_t1w', 'inputnode.std_t1w'), + ('outputnode.std_mask', 'inputnode.std_mask'), ]), ]) # fmt:skip if select_MNI2009c_xfm is not None: workflow.connect([ (select_MNI2009c_xfm, asl_wf, [ - ("std2anat_xfm", "inputnode.mni2009c2anat_xfm"), + ('std2anat_xfm', 'inputnode.mni2009c2anat_xfm'), ]), (select_MNI2009c_xfm_fw, asl_wf, [ - ("anat2std_xfm", "inputnode.anat2mni2009c_xfm"), + ('anat2std_xfm', 'inputnode.anat2mni2009c_xfm'), ]), ]) # fmt:skip @@ -687,11 +687,11 @@ def init_single_subject_wf(subject_id: str): # want MNI152NLin6Asym outputs, but we'll live with it. if config.workflow.cifti_output: workflow.connect([ - (select_MNI6_xfm, asl_wf, [("anat2std_xfm", "inputnode.anat2mni6_xfm")]), - (select_MNI6_tpl, asl_wf, [("brain_mask", "inputnode.mni6_mask")]), - (hcp_morphometrics_wf, asl_wf, [("outputnode.roi", "inputnode.cortex_mask")]), + (select_MNI6_xfm, asl_wf, [('anat2std_xfm', 'inputnode.anat2mni6_xfm')]), + (select_MNI6_tpl, asl_wf, [('brain_mask', 'inputnode.mni6_mask')]), + (hcp_morphometrics_wf, asl_wf, [('outputnode.roi', 'inputnode.cortex_mask')]), (resample_midthickness_wf, asl_wf, [ - ("outputnode.midthickness_fsLR", "inputnode.midthickness_fsLR"), + ('outputnode.midthickness_fsLR', 'inputnode.midthickness_fsLR'), ]), ]) # fmt:skip @@ -732,11 +732,11 @@ def map_fieldmap_estimation( if not fmap_estimators: if use_syn: message = ( - "Fieldmap-less (SyN) estimation was requested, but PhaseEncodingDirection " - "information appears to be absent." + 'Fieldmap-less (SyN) estimation was requested, but PhaseEncodingDirection ' + 'information appears to be absent.' ) config.loggers.workflow.error(message) - if use_syn == "error": + if use_syn == 'error': raise ValueError(message) return [], {} @@ -780,14 +780,14 @@ def map_fieldmap_estimation( def clean_datasinks(workflow: pe.Workflow) -> pe.Workflow: """Overwrite ``out_path_base`` of dependency pipelines' DataSinks.""" for node in workflow.list_node_names(): - if node.split(".")[-1].startswith("ds_"): - workflow.get_node(node).interface.out_path_base = "" + if node.split('.')[-1].startswith('ds_'): + workflow.get_node(node).interface.out_path_base = '' return workflow def get_estimator(layout, fname): """Get estimator.""" - field_source = layout.get_metadata(fname).get("B0FieldSource") + field_source = layout.get_metadata(fname).get('B0FieldSource') if isinstance(field_source, str): field_source = (field_source,) @@ -798,7 +798,7 @@ def get_estimator(layout, fname): from sdcflows.fieldmaps import get_identifier # Fallback to IntendedFor - intended_rel = re.sub(r"^sub-[a-zA-Z0-9]*/", "", str(Path(fname).relative_to(layout.root))) + intended_rel = re.sub(r'^sub-[a-zA-Z0-9]*/', '', str(Path(fname).relative_to(layout.root))) field_source = get_identifier(intended_rel) return field_source diff --git a/docs/conf.py b/docs/conf.py index e6441fe07..4e450ff20 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -27,13 +27,13 @@ from packaging import version as pver from sphinx import __version__ as sphinxversion -sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath('..')) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. # If the directory is relative to the documentation root, use os.path.abspath to make it absolute, # like shown here. -sys.path.append(os.path.abspath("sphinxext")) +sys.path.append(os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve @@ -41,43 +41,43 @@ # General configuration # ----------------------------------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.2.0" +needs_sphinx = '4.2.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named "sphinx.ext.*") or your custom ones. extensions = [ - "myst_parser", - "nipype.sphinxext.apidoc", - "nipype.sphinxext.plot_workflow", - "sphinx.ext.autodoc", - "sphinx.ext.coverage", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.linkcode", - "sphinx.ext.mathjax", - "sphinx_markdown_tables", - "sphinxarg.ext", # argparse extension - "sphinxcontrib.apidoc", - "sphinxcontrib.bibtex", + 'myst_parser', + 'nipype.sphinxext.apidoc', + 'nipype.sphinxext.plot_workflow', + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.linkcode', + 'sphinx.ext.mathjax', + 'sphinx_markdown_tables', + 'sphinxarg.ext', # argparse extension + 'sphinxcontrib.apidoc', + 'sphinxcontrib.bibtex', ] # Mock modules in autodoc: autodoc_mock_imports = [ - "numpy", - "matplotlib", - "pygraphviz", + 'numpy', + 'matplotlib', + 'pygraphviz', ] -if pver.parse(sphinxversion) >= pver.parse("1.7.0"): +if pver.parse(sphinxversion) >= pver.parse('1.7.0'): autodoc_mock_imports += [ - "pandas", - "nilearn", - "seaborn", - "pygraphviz", + 'pandas', + 'nilearn', + 'seaborn', + 'pygraphviz', ] # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # ----------------------------------------------------------------------------- # Napoleon settings @@ -102,62 +102,62 @@ # https://github.com/sphinx-contrib/napoleon/pull/10 is merged. napoleon_use_param = False napoleon_custom_sections = [ - ("Inputs", "Parameters"), - ("Outputs", "Parameters"), - ("Attributes", "Parameters"), - ("Mandatory Inputs", "Parameters"), - ("Optional Inputs", "Parameters"), - ("License", "License"), + ('Inputs', 'Parameters'), + ('Outputs', 'Parameters'), + ('Attributes', 'Parameters'), + ('Mandatory Inputs', 'Parameters'), + ('Optional Inputs', 'Parameters'), + ('License', 'License'), ] # ----------------------------------------------------------------------------- # Extension configuration # ----------------------------------------------------------------------------- -apidoc_module_dir = "../aslprep" -apidoc_output_dir = "api" -apidoc_excluded_paths = ["conftest.py", "*/tests/*", "tests/*", "data/*"] +apidoc_module_dir = '../aslprep' +apidoc_output_dir = 'api' +apidoc_excluded_paths = ['conftest.py', '*/tests/*', 'tests/*', 'data/*'] apidoc_separate_modules = True -apidoc_extra_args = ["--module-first", "-d 1", "-T"] +apidoc_extra_args = ['--module-first', '-d 1', '-T'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = [".rst", ".md"] -source_suffix = ".rst" +source_suffix = '.rst' # The encoding of source files. -source_encoding = "utf-8-sig" +source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = "aslprep" -author = "ASLPrep developers" -copyright = f"2020-{datetime.now().year}, {author}" +project = 'aslprep' +author = 'ASLPrep developers' +copyright = f'2020-{datetime.now().year}, {author}' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = "version" +version = 'version' # The full version, including alpha/beta/rc tags. -release = "version" +release = 'version' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = "en" +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all documents. -default_role = "autolink" +default_role = 'autolink' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -171,7 +171,7 @@ show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "default" +pygments_style = 'default' # A list of ignored prefixes for module index sorting. modindex_common_prefix = [] @@ -187,7 +187,7 @@ # ----------------------------------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_rtd_theme" +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -216,7 +216,7 @@ # relative to this directory. # They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, # relative to this directory. @@ -259,7 +259,7 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -html_use_opensearch = "" +html_use_opensearch = '' # This is the file name suffix for HTML files (e.g., ".xhtml"). html_file_suffix = None @@ -268,67 +268,67 @@ # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -html_search_language = "en" +html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. -html_search_options = {"type": "default"} +html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -html_search_scorer = "" +html_search_scorer = '' # Output file base name for HTML help builder. -htmlhelp_basename = "aslprepdoc" +htmlhelp_basename = 'aslprepdoc' # ----------------------------------------------------------------------------- # Options for manual page output # ----------------------------------------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "aslprep", "aslprep Documentation", [author], 1)] +man_pages = [(master_doc, 'aslprep', 'aslprep Documentation', [author], 1)] # If true, show URL addresses after external links. man_show_urls = False # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve( - "aslprep", + 'aslprep', ( - "https://github.com/pennlinc/aslprep/blob/" - "{revision}/{package}/{path}#L{lineno}" # noqa: FS003 + 'https://github.com/pennlinc/aslprep/blob/' + '{revision}/{package}/{path}#L{lineno}' # noqa: FS003 ), ) # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("https://docs.python.org/", None), - "numpy": ("http://docs.scipy.org/doc/numpy", None), - "scipy": ("http://docs.scipy.org/doc/scipy/reference", None), - "matplotlib": ("http://matplotlib.sourceforge.net", None), - "bids": ("https://bids-standard.github.io/pybids/", None), - "nibabel": ("https://nipy.org/nibabel/", None), - "nipype": ("https://nipype.readthedocs.io/en/latest/", None), - "niworkflows": ("https://www.nipreps.org/niworkflows/", None), - "sdcflows": ("https://www.nipreps.org/sdcflows/", None), - "smriprep": ("https://www.nipreps.org/smriprep/", None), - "templateflow": ("https://www.templateflow.org/python-client", None), + 'python': ('https://docs.python.org/', None), + 'numpy': ('http://docs.scipy.org/doc/numpy', None), + 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), + 'matplotlib': ('http://matplotlib.sourceforge.net', None), + 'bids': ('https://bids-standard.github.io/pybids/', None), + 'nibabel': ('https://nipy.org/nibabel/', None), + 'nipype': ('https://nipype.readthedocs.io/en/latest/', None), + 'niworkflows': ('https://www.nipreps.org/niworkflows/', None), + 'sdcflows': ('https://www.nipreps.org/sdcflows/', None), + 'smriprep': ('https://www.nipreps.org/smriprep/', None), + 'templateflow': ('https://www.templateflow.org/python-client', None), } -suppress_warnings = ["image.nonlocal_uri"] +suppress_warnings = ['image.nonlocal_uri'] # ----------------------------------------------------------------------------- # sphinxcontrib-bibtex # ----------------------------------------------------------------------------- -bibtex_bibfiles = ["../aslprep/data/boilerplate.bib"] -bibtex_style = "unsrt" -bibtex_reference_style = "author_year" -bibtex_footbibliography_header = "" +bibtex_bibfiles = ['../aslprep/data/boilerplate.bib'] +bibtex_style = 'unsrt' +bibtex_reference_style = 'author_year' +bibtex_footbibliography_header = '' def setup(app): """Add extra formatting files.""" - app.add_css_file("theme_overrides.css") + app.add_css_file('theme_overrides.css') # We need this for the boilerplate script - app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js") + app.add_js_file('https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js') diff --git a/docs/sphinxext/github_link.py b/docs/sphinxext/github_link.py index 510e82d90..f08babb46 100644 --- a/docs/sphinxext/github_link.py +++ b/docs/sphinxext/github_link.py @@ -9,16 +9,16 @@ from functools import partial from operator import attrgetter -REVISION_CMD = "git rev-parse --short HEAD" +REVISION_CMD = 'git rev-parse --short HEAD' def _get_git_revision(): try: revision = subprocess.check_output(REVISION_CMD.split()).strip() except (subprocess.CalledProcessError, OSError): - print("Failed to execute git to get revision") + print('Failed to execute git to get revision') return None - return revision.decode("utf-8") + return revision.decode('utf-8') def _linkcode_resolve(domain, info, package, url_fmt, revision): @@ -38,17 +38,17 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): if revision is None: return - if domain not in ("py", "pyx"): + if domain not in ('py', 'pyx'): return - if not info.get("module") or not info.get("fullname"): + if not info.get('module') or not info.get('fullname'): return - class_name = info["fullname"].split(".")[0] + class_name = info['fullname'].split('.')[0] if type(class_name) != str: # Python 2 only - class_name = class_name.encode("utf-8") - module = __import__(info["module"], fromlist=[class_name]) - obj = attrgetter(info["fullname"])(module) + class_name = class_name.encode('utf-8') + module = __import__(info['module'], fromlist=[class_name]) + obj = attrgetter(info['fullname'])(module) try: fn = inspect.getsourcefile(obj) @@ -66,7 +66,7 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): try: lineno = inspect.getsourcelines(obj)[1] except Exception: - lineno = "" + lineno = '' return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) diff --git a/pyproject.toml b/pyproject.toml index 5637d7ad9..dafcf812e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,10 @@ doc = [ "sphinxcontrib-apidoc", "sphinxcontrib-bibtex", ] +dev = [ + "ruff ~= 0.4.3", + "pre-commit", +] test = [ "codecov", "coverage",