From 22a18ba535c47c1a67bf1af55543cfa722c86186 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Tue, 30 Apr 2024 10:59:14 -0400 Subject: [PATCH 01/16] collective code refactory, updated module and class name convention and polish the code style --- .dockerignore | 4 +- .github/pull_request_template.md | 2 +- README.md | 1 - brkraw/api/__init__.py | 2 +- brkraw/api/config/__init__.py | 3 + brkraw/api/config/config.yaml | 24 +++ brkraw/api/config/manager.py | 145 ++++++++++++++ brkraw/api/data/__init__.py | 22 +- brkraw/api/data/scan.py | 189 ++++++++++++++---- brkraw/api/data/study.py | 125 +++++++++--- brkraw/api/data/study.yaml | 44 ++++ brkraw/api/helper/recipe.py | 5 +- brkraw/api/pvobj/__init__.py | 4 +- brkraw/api/pvobj/{pvdataset.py => pvstudy.py} | 32 ++- brkraw/app/tonifti/base.py | 56 +++--- brkraw/app/tonifti/header.py | 11 +- brkraw/app/tonifti/scan.py | 65 ++++-- brkraw/app/tonifti/study.py | 104 +++++++--- brkraw/config.py | 87 -------- 19 files changed, 666 insertions(+), 259 deletions(-) create mode 100644 brkraw/api/config/__init__.py create mode 100644 brkraw/api/config/config.yaml create mode 100644 brkraw/api/config/manager.py create mode 100644 brkraw/api/data/study.yaml rename brkraw/api/pvobj/{pvdataset.py => pvstudy.py} (87%) delete mode 100644 brkraw/config.py diff --git a/.dockerignore b/.dockerignore index 769d3c5..2f1e449 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,6 +6,7 @@ env **/__pycache__ **/.pytest_cache +**/.mypy_cache .idea/** @@ -16,4 +17,5 @@ env tests/* paper -.DS_Store \ No newline at end of file +.DS_Store + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1511338..6d1f9c5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -6,4 +6,4 @@ Changes proposed in this pull request: - -@BrkRaw/Bruker +@BrkRaw/brkraw diff --git a/README.md b/README.md index 19a2793..b97cc6f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ ## BrkRaw: A comprehensive tool to access raw Bruker Biospin MRI data #### Version: 0.3.11 - ### Description The ‘BrkRaw’ is a python module designed to provide a comprehensive tool to access raw data acquired from diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 261d4a1..60826d6 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ from .data import Study -from ..config import ConfigManager +from .config import Manager as ConfigManager __all__ = ['Study', 'ConfigManager'] \ No newline at end of file diff --git a/brkraw/api/config/__init__.py b/brkraw/api/config/__init__.py new file mode 100644 index 0000000..6698a67 --- /dev/null +++ b/brkraw/api/config/__init__.py @@ -0,0 +1,3 @@ +from .manager import Manager + +__all__ = ['Manager'] \ No newline at end of file diff --git a/brkraw/api/config/config.yaml b/brkraw/api/config/config.yaml new file mode 100644 index 0000000..3c90c93 --- /dev/null +++ b/brkraw/api/config/config.yaml @@ -0,0 +1,24 @@ +# default configuration for brkraw +plugin: + repo: + - https://github.com/brkraw/brkraw-plugin.git/plugin + template: + - boilerplate + +preset: + repo: + - https://github.com/brkraw/brkraw-plugin.git/preset + template: + - boilerplate + +bids: + spec: + repo: + - https://github.com/brkraw/brkraw-bids.git/spec + recipes: + repo: + - https://github.com/brkraw/brkraw-bids.git/recipes + +app: + tonifti: + output_format: ___ \ No newline at end of file diff --git a/brkraw/api/config/manager.py b/brkraw/api/config/manager.py new file mode 100644 index 0000000..f60e150 --- /dev/null +++ b/brkraw/api/config/manager.py @@ -0,0 +1,145 @@ +from __future__ import annotations +import yaml +from pathlib import Path +from brkraw import __version__ +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +class Manager: + """ + Make this configuration works internally without create file but if user use cli to create, do it so (create on home folder) + Manage the configuration settings. + + Notes: + - Provides methods to ensure the existence of the config directory, load or create the configuration, + set configuration values, and retrieve configuration values. + """ + def __init__(self): + """ + Initialize the configuration object. + + Notes: + - Sets up the home directory, config directory, and config file paths. + - Ensures the existence of the config directory and loads or creates the configuration. + """ + self.home = Path.home() + self.package = Path(__file__).absolute().parent + self.local_dir = Path.cwd() + self.global_dir = self.home_dir / '.brkraw' + self.fname = 'config.yaml' + + def config_file(self, target: Literal['local', 'global'] = 'global'): + dir = self.global_dir if target == 'global' else self.local_dir + if dir.exists() and (dir / self.fname).exists(): + return dir / self.fname + else: + return self.package / self.fname + + def ensure_config_dir_exists(self): + """ + Ensure the existence of the configuration directory. + + Notes: + - Creates the config directory if it does not already exist. + - Also creates 'plugin' and 'bids' directories within the config directory. + """ + if not self.config_dir.exists(): + self.config_dir.mkdir() + (self.config_dir / 'plugin').mkdir() + (self.config_dir / 'preset').mkdir() + (self.config_dir / 'bids').mkdir() + + def load(self, target: Literal['local', 'global'] = 'global'): + """ + Load an existing configuration file or create a new one if it does not exist. + + Notes: + - If the config file does not exist, a default configuration is created and saved. + - Otherwise, the existing configuration is loaded from the file. + """ + if not self.config_file.exists(): + with open(self.installed_dir / 'config.yalm') as f: + self.config = yaml.safe_load(f) + + def create(self, target: Literal['local', 'global'] = 'global'): + """_summary_ + + Returns: + _type_: _description_ + """ + + # use default config if no configure created, + # for downloading location, if no configuration folder created (~/.brkraw), use local folder + # also check local folder first (plugin, preset, bids), where you run a command + def set(self, key, value): + """ + Set a key-value pair in the configuration and save the updated configuration to the file. + + Args: + key: The key to set in the configuration. + value: The value to associate with the key. + + Notes: + - Updates the configuration with the provided key-value pair. + - Persists the updated configuration to the config file. + """ + self.config[key] = value + with open(self.config_file, 'w') as f: + yaml.dump(self.config, f, sort_keys=False) + + def get(self, key): + """ + Retrieve the value associated with the given key from the configuration. + + Args: + key: The key to retrieve the value for. + + Returns: + The value associated with the key in the configuration, or None if the key is not found. + + Notes: + - Returns the value corresponding to the provided key from the configuration. + """ + return self.config.get(key) + +# def get_scan_time(self, visu_pars=None): +# import datetime as dt +# subject_date = get_value(self._subject, 'SUBJECT_date') +# subject_date = subject_date[0] if isinstance(subject_date, list) else subject_date +# pattern_1 = r'(\d{2}:\d{2}:\d{2})\s+(\d+\s\w+\s\d{4})' +# pattern_2 = r'(\d{4}-\d{2}-\d{2})[T](\d{2}:\d{2}:\d{2})' +# if re.match(pattern_1, subject_date): +# # start time +# start_time = dt.time(*map(int, re.sub(pattern_1, r'\1', subject_date).split(':'))) +# # date +# date = dt.datetime.strptime(re.sub(pattern_1, r'\2', subject_date), '%d %b %Y').date() +# # end time +# if visu_pars != None: +# last_scan_time = get_value(visu_pars, 'VisuAcqDate') +# last_scan_time = dt.time(*map(int, re.sub(pattern_1, r'\1', last_scan_time).split(':'))) +# acq_time = get_value(visu_pars, 'VisuAcqScanTime') / 1000.0 +# time_delta = dt.timedelta(0, acq_time) +# scan_time = (dt.datetime.combine(date, last_scan_time) + time_delta).time() +# return dict(date=date, +# start_time=start_time, +# scan_time=scan_time) +# elif re.match(pattern_2, subject_date): +# # start time +# # subject_date = get_value(self._subject, 'SUBJECT_date')[0] +# start_time = dt.time(*map(int, re.sub(pattern_2, r'\2', subject_date).split(':'))) +# # date +# date = dt.date(*map(int, re.sub(pattern_2, r'\1', subject_date).split('-'))) + +# # end date +# if visu_pars != None: +# scan_time = get_value(visu_pars, 'VisuCreationDate')[0] +# scan_time = dt.time(*map(int, re.sub(pattern_2, r'\2', scan_time).split(':'))) +# return dict(date=date, +# start_time=start_time, +# scan_time=scan_time) +# else: +# raise Exception(ERROR_MESSAGES['NotIntegrated']) + +# return dict(date=date, +# start_time=start_time) \ No newline at end of file diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py index 2b9f2b4..4ba69e0 100644 --- a/brkraw/api/data/__init__.py +++ b/brkraw/api/data/__init__.py @@ -1,4 +1,24 @@ +"""Initializes and exports the main components of the MRI study and scan management package. + +This package module consolidates and provides easy access to the primary classes involved in managing +and analyzing MRI study and scan data. The classes exported here facilitate the interfacing with MRI +data at both the study and scan levels, supporting detailed data manipulation and analysis. + +Exports: + Study: A class that manages MRI study operations, extending functionalities for detailed study data handling. + Scan: A class representing individual MRI scans, capable of detailed scan data analysis and management. + ScanInfo: A class for managing basic information and warnings related to MRI scans. + +The `__init__.py` module ensures that these classes are readily accessible when the package is imported, +making the package easier to use and integrate into larger projects or applications. + +Example: + from your_package_name import Study, Scan, ScanInfo + +This enables straightforward access to these classes for further development and deployment in MRI data analysis tasks. +""" + from .study import Study from .scan import Scan, ScanInfo -__all__ = ['Study', 'Scan', 'ScanInfo'] \ No newline at end of file +__all__ = ['Study', 'Scan', 'ScanInfo'] diff --git a/brkraw/api/data/scan.py b/brkraw/api/data/scan.py index 811be91..1ca05c6 100644 --- a/brkraw/api/data/scan.py +++ b/brkraw/api/data/scan.py @@ -1,92 +1,211 @@ +"""This module provides classes and functions for handling and analyzing photovoltaic objects from MRI scans. + +It is designed to interface with the ParaVision data structures (`PvScan`, `PvReco`, `PvFiles`) +and perform various analytical tasks to assist in the study of MRI scans. + +Classes: + ScanInfo: Handles basic scan information and warning accumulation. + Scan: Main interface class for working with Pv objects and handling detailed scan analysis, + including retrieval of objects from memory and performing affine and data array analysis. + +This module is part of the `brkraw` package which aims to provide tools for MRI data manipulation and analysis. +""" + from __future__ import annotations -from typing import Optional, Union import ctypes -from ..pvobj import PvScan, PvReco, PvFiles -from ..pvobj.base import BaseBufferHandler -from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from brkraw.api.pvobj.base import BaseBufferHandler +from brkraw.api.analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Union + from .study import Study class ScanInfo(BaseAnalyzer): - def __init__(self): - self.warns = [] - + """Handles the accumulation of warnings and basic information about MRI scans. + + This class is designed to store general scan information and accumulate any warnings that might arise + during the scan processing. It serves as a foundational class for more detailed analysis classes + that may require access to accumulated warnings and basic scan metrics. + + Attributes: + warns (list): A list that accumulates warning messages related to the scan analysis. + """ + def __init__(self) -> None: + """Initializes a new instance of ScanInfo with an empty list for warnings.""" + self.warns: list[str] = [] + @property - def num_warns(self): + def num_warns(self) -> int: + """Counts the number of warnings accumulated during the scan processing. + + Returns: + int: The total number of warnings accumulated. + """ return len(self.warns) class Scan(BaseBufferHandler): - """The Scan class design to interface with analyzer, + """Interface class for working with various Pv objects and handling scan information. - Args: - pvobj (_type_): _description_ + Attributes: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The photovoltaic object associated with this scan. + reco_id (Optional[int]): The reconstruction ID for the scan, defaults to None. + study_address (Optional[int]): Memory address of the study object, defaults to None. + debug (bool): Flag to enable debug mode, defaults to False. """ - def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int] = None, - study_address: Optional[int] = None, debug: bool=False): + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], + reco_id: Optional[int] = None, + study_address: Optional[int] = None, + debug: bool = False) -> None: + """Initializes the Scan object with necessary identifiers and addresses. + + Args: + pvobj: The ParaVision data object to be used throughout the scan analysis. + reco_id: Optional reconstruction identifier. + study_address: Optional memory address of the associated study object. + debug: Flag indicating whether to run in debug mode. + """ self.reco_id = reco_id self._study_address = study_address self._pvobj_address = id(pvobj) self.is_debug = debug self.set_scaninfo() - - def retrieve_pvobj(self): + + def retrieve_pvobj(self) -> Union['PvScan', 'PvReco', 'PvFiles', None]: + """Retrieves the pvobj from memory using its stored address. + + Returns: + The pvobj if available; otherwise, None. + """ if self._pvobj_address: - return ctypes.cast(self._pvobj_address, ctypes.py_object).value + return ctypes.cast(self._pvobj_address, + ctypes.py_object).value + return None - def retrieve_study(self): + def retrieve_study(self) -> Optional['Study']: + """Retrieves the study object from memory using its stored address. + + Returns: + The study object if available; otherwise, None. + """ if self._study_address: - return ctypes.cast(self._study_address, ctypes.py_object).value + return ctypes.cast(self._study_address, + ctypes.py_object).value + return None - def set_scaninfo(self, reco_id:Optional[int] = None): + def set_scaninfo(self, reco_id: Optional[int] = None) -> None: + """Sets the scan information based on the reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which scan information to retrieve and set. + """ reco_id = reco_id or self.reco_id self.info = self.get_scaninfo(reco_id) - def get_scaninfo(self, reco_id:Optional[int] = None, get_analyzer:bool = False): + def get_scaninfo(self, + reco_id: Optional[int] = None, + get_analyzer: bool = False) -> Union['ScanInfoAnalyzer', 'ScanInfo']: + """Gets the scan information, optionally using an analyzer to enrich the data. + + Args: + reco_id: Optional reconstruction ID to specify which scan information to retrieve. + get_analyzer: Flag indicating whether to use the ScanInfoAnalyzer for detailed analysis. + + Returns: + An instance of ScanInfo or ScanInfoAnalyzer with the relevant scan details. + """ infoobj = ScanInfo() pvobj = self.retrieve_pvobj() - analysed = ScanInfoAnalyzer(pvobj, reco_id, self.is_debug) + analysed = ScanInfoAnalyzer(pvobj=pvobj, # type: ignore + reco_id=reco_id, + debug=self.is_debug) if get_analyzer: return analysed for attr_name in dir(analysed): if 'info_' in attr_name: attr_vals = getattr(analysed, attr_name) - if warns:= attr_vals.pop('warns', None): + if warns := attr_vals.pop('warns', None): infoobj.warns.extend(warns) setattr(infoobj, attr_name.replace('info_', ''), attr_vals) return infoobj - def get_affine_analyzer(self, reco_id:Optional[int] = None): + def get_affine_analyzer(self, + reco_id: Optional[int] = None) -> 'AffineAnalyzer': + """Retrieves the affine analysis object for the specified reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which affine analysis to retrieve. + + Returns: + An AffineAnalyzer object initialized with the scan information. + """ if reco_id: - info = self.get_scaninfo(reco_id) + info = self.get_scaninfo(reco_id, get_analyzer=False) else: info = self.info if hasattr(self, 'info') else self.get_scaninfo(self.reco_id) - return AffineAnalyzer(info) + return AffineAnalyzer(info) # type: ignore - def get_datarray_analyzer(self, reco_id: Optional[int] = None): + def get_datarray_analyzer(self, + reco_id: Optional[int] = None) -> 'DataArrayAnalyzer': + """Retrieves the data array analyzer for the specified reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which data array analysis to perform. + + Returns: + A DataArrayAnalyzer object initialized with the scan and file information. + """ reco_id = reco_id or self.reco_id pvobj = self.retrieve_pvobj() - fileobj = pvobj.get_2dseq(reco_id=reco_id) + fileobj = pvobj.get_2dseq(reco_id=reco_id) # type: ignore self._buffers.append info = self.info if hasattr(self, 'info') else self.get_scaninfo(reco_id) - return DataArrayAnalyzer(info, fileobj) + return DataArrayAnalyzer(info, fileobj) # type: ignore @property - def avail(self): + def avail(self) -> list[int]: + """List of available reconstruction IDs for the current pvobj. + + Returns: + A list of integers representing the available reconstruction IDs. + """ return self.pvobj.avail @property - def pvobj(self): - return self.retrieve_pvobj() + def pvobj(self) -> Union['PvScan', 'PvReco', 'PvFiles']: + """Retrieves the pvobj from memory. + + Returns: + The current bound pvobj. + """ + return self.retrieve_pvobj() # type: ignore @property - def about_scan(self): + def about_scan(self) -> dict: + """Provides a dictionary with analyzed results for the scan. + + Returns: + A dictionary containing analyzed scan results. + """ return self.info.to_dict() @property - def about_affine(self): + def about_affine(self) -> dict: + """Provides a dictionary with analyzed results for affine transformations. + + Returns: + A dictionary containing analyzed affine results. + """ return self.get_affine_analyzer().to_dict() @property - def about_dataarray(self): - return self.get_datarray_analyzer().to_dict() \ No newline at end of file + def about_dataarray(self) -> dict: + """Provides a dictionary with analyzed results for the data array. + + Returns: + A dictionary containing analyzed data array results. + """ + return self.get_datarray_analyzer().to_dict() diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index aa120c7..1ad6ed2 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -1,46 +1,125 @@ +"""This module provides classes and functions for managing and analyzing MRI study data. + +The primary class, Study, extends the functionalities of PvStudy from the brkraw.api.pvobj module +and integrates additional analysis capabilities through the BaseAnalyzer class. It handles the +processing of study-specific data, including the retrieval and management of scan objects, +parsing of study header information, and compiling comprehensive information about studies. + +Classes: + Study: Manages MRI study operations and integrates data processing and analysis capabilities. + It provides methods to retrieve specific scans, parse and access study header data, + and compile detailed information about the study and its associated scans and reconstructions. + +Dependencies: + PvStudy (from brkraw.api.pvobj): + Base class for handling the basic operations related to photovoltaic studies. + BaseAnalyzer (from brkraw.api.analyzer.base): + Base class providing analytical methods used across different types of data analyses. + Scan (from .scan): + Class representing individual scans within a study, providing detailed data access and manipulation. + Recipe (from brkraw.api.helper.recipe): + Utility class used for applying specified recipes to data objects, enabling structured data extraction and analysis. + +This module is utilized in MRI research environments where detailed and structured analysis of photovoltaic data is required. +""" + from __future__ import annotations -from ..pvobj import PvDataset +import os +import yaml from .scan import Scan +from brkraw.api.pvobj import PvStudy +from brkraw.api.analyzer.base import BaseAnalyzer +from brkraw.api.helper.recipe import Recipe from pathlib import Path +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional + -class Study(PvDataset): - def __init__(self, path: Path): +class Study(PvStudy, BaseAnalyzer): + """Handles operations related to a specific study, integrating PvStudy and analytical capabilities. + + This class extends the functionalities of PvStudy to include detailed analyses + and operations specific to the study being handled. It integrates with various + data processing and analysis methods defined in the base analyzer. + + Attributes: + header (Optional[dict]): Parsed study header information. + """ + def __init__(self, path: Path) -> None: + """Initializes the Study object with a specified path. + + Args: + path (Path): The file system path to the study data. + """ super().__init__(path) self._parse_header() - def get_scan(self, scan_id, reco_id=None, debug=False): - """ - Get a scan object by scan ID. + def get_scan(self, + scan_id: int, + reco_id: Optional[int] = None, + debug: bool = False) -> 'Scan': + """Retrieves a Scan object for a given scan ID with optional reconstruction ID. + + Args: + scan_id (int): The unique identifier for the scan. + reco_id (Optional[int]): The reconstruction identifier, defaults to None. + debug (bool): Flag to enable debugging outputs, defaults to False. + + Returns: + Scan: The Scan object corresponding to the specified scan_id and reco_id. """ pvscan = super().get_scan(scan_id) - return Scan(pvobj=pvscan, reco_id=reco_id, - study_address=id(self), debug=debug) + return Scan(pvobj=pvscan, + reco_id=reco_id, + study_address=id(self), + debug=debug) - def _parse_header(self): + def _parse_header(self) -> None: + """Parses the header information from the study metadata. + + Extracts the header data based on subject and parameters, setting up the + study header attribute. This method handles cases with different versions + of ParaVision by adjusting the header format accordingly. + """ if not self.contents or 'subject' not in self.contents['files']: self.header = None return subj = self.subject subj_header = getattr(subj, 'header') if subj.is_parameter() else None if title := subj_header['TITLE'] if subj_header else None: - self.header = {k.replace("SUBJECT_",""):v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} + self.header = {k.replace("SUBJECT_", ""): v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} self.header['sw_version'] = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" @property - def avail(self): + def avail(self) -> list: + """List of available scan IDs within the study. + + Returns: + list: A list of integers representing the available scan IDs. + """ return super().avail - @property #TODO - def info(self): - """output all analyzed information""" - info = {'header': None, + @property + def info(self) -> dict: + """Compiles comprehensive information about the study, including header details and scans. + + Uses external YAML configuration to drive the synthesis of structured information about the study, + integrating data from various scans and their respective reconstructions. + + Returns: + dict: A dictionary containing structured information about the study, its scans, and reconstructions. + """ + spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') + with open(spec_path, 'r') as f: + spec = yaml.safe_load(f) + info = {'header': Recipe(self, spec['study']).get(), 'scans': {}} - if header := self.header: - info['header'] = header - # for scan_id in self.avail: - # scanobj = self.get_scan(scan_id) - # info['scans'][scan_id] = {'protocol_name': scanobj.info.protocol['protocol_name'], - # 'recos': {}} - # for reco_id in scanobj.avail: - # info['scans'][scan_id]['recos'][reco_id] = scanobj.get_info(reco_id).frame_group + for scan_id in self.avail: + scanobj = self.get_scan(scan_id) + info['scans'][scan_id] = Recipe(scanobj.info, spec['scan']).get() + info['scans'][scan_id]['recos'] = {} + for reco_id in scanobj.avail: + recoinfo = scanobj.get_scaninfo(reco_id) + info['scans'][scan_id]['recos'][reco_id] = Recipe(recoinfo, spec['reco']).get() return info diff --git a/brkraw/api/data/study.yaml b/brkraw/api/data/study.yaml new file mode 100644 index 0000000..9fc9512 --- /dev/null +++ b/brkraw/api/data/study.yaml @@ -0,0 +1,44 @@ +study: + date: + - header.study_date + - header.date + dob: header.dbirth + id: header.id + name: header.name_string + operator: study_operator + position: + - header.study_instrument_position + - entry: header.entry + position: header.position + script: entry.split("_").pop(-1) + "_" + position.split("_").pop(-1) + sex: + - header.gender + - header.sex + study_name: header.study_name + study_nr: header.study_nr + sw_version: header.sw_version + type: header.type + weight: + - header.study_weight + - header.weight + +scan: + dim: image.dim + in_plane_shape: image.shape + in_plann_resolution: image.resolution + method: protocol.scan_method + num_cycles: cycle.num_cycles + num_slice_packs: slicepack.num_slice_packs + num_slices_each_pack: slicepack.num_slices_each_pack + ppg: protocol.pulse_program + protocol: protocol.protocol_name + slice_distances_each_pack: slicepack.slice_distances_each_pack + slice_order_scheme: slicepack.slice_order_scheme + time_step: cycle.time_step + +reco: + dim_description: + dim_desc: image.dim_desc + fg_desc: frame_group.id + script: dim_desc + [f.split("_")[-1].lower() for f in fg_desc] + type: frame_group.type diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py index ae91f5e..92d7282 100644 --- a/brkraw/api/helper/recipe.py +++ b/brkraw/api/helper/recipe.py @@ -69,8 +69,9 @@ def _process_dict(self, dict_obj: Dict): def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): script = dict_obj.pop(script_cmd) - for s in self.startup_scripts: - exec(s) + if self.startup_scripts: + for s in self.startup_scripts: + exec(s) for key, value in dict_obj.items(): value = self._eval_value(value) if value == None: diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index 76c26cf..bbb04a0 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,7 +1,7 @@ -from .pvdataset import PvDataset +from .pvstudy import PvStudy from .pvscan import PvScan from .pvreco import PvReco from .pvfiles import PvFiles from .parameters import Parameter, Parser -__all__ = ['PvDataset', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file +__all__ = ['PvStudy', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvstudy.py similarity index 87% rename from brkraw/api/pvobj/pvdataset.py rename to brkraw/api/pvobj/pvstudy.py index 13b3c9e..7a1d717 100755 --- a/brkraw/api/pvobj/pvdataset.py +++ b/brkraw/api/pvobj/pvstudy.py @@ -6,9 +6,8 @@ from .pvscan import PvScan -class PvDataset(BaseMethods): - """ - A class representing a PvDataset object. +class PvStudy(BaseMethods): + """A class representing a PvStudy object. Inherits from BaseMethods. @@ -24,8 +23,7 @@ class PvDataset(BaseMethods): contents (dict): A dictionary of pvdataset contents. """ def __init__(self, path: Path, debug: bool=False): - """ - Initialize the object with the given path and optional debug flag. + """Initialize the object with the given path and optional debug flag. Args: path: The path to initialize the object with. @@ -36,7 +34,7 @@ def __init__(self, path: Path, debug: bool=False): Any exceptions raised by _check_dataset_validity or _construct methods. Notes: - If 'pvdataset' is present in kwargs, it will be used to initialize the object via super(). + If 'pvstudy' is present in kwargs, it will be used to initialize the object via super(). Examples: obj = ClassName(path='/path/to/dataset', debug=True) @@ -48,11 +46,10 @@ def __init__(self, path: Path, debug: bool=False): # internal method def _check_dataset_validity(self, path: Path): - """ - Checks the validity of a given dataset path. + """Checks the validity of a given dataset path. Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, - and does not check the validity of a `PvDataset`. + and does not check the validity of a `PvStudy`. Args: path (str): The path to check. @@ -78,8 +75,7 @@ def _check_dataset_validity(self, path: Path): raise ValueError(f"The path '{self._path}' does not meet the required criteria.") def _construct(self): - """ - Constructs the object by organizing the contents. + """Constructs the object by organizing the contents. This method constructs the object by organizing the contents based on the provided directory structure. It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. @@ -106,8 +102,7 @@ def _construct(self): self._clear_contents(to_remove) def _process_childobj(self, matched, item): - """ - The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. + """The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. Args: matched: A `re.Match` object representing the matched pattern. @@ -154,8 +149,7 @@ def _clear_contents(self, to_be_removed): @property def path(self): - """ - Gets the path of the object. + """Gets the path of the object. Returns: str: The path of the object. @@ -164,17 +158,15 @@ def path(self): @property def avail(self): - """ - A property representing the available scans. + """A property representing the available scans. Returns: list: A list of available scans. """ return sorted(list(self._scans)) - def get_scan(self, scan_id): - """ - Get a specific scan object by ID. + def get_scan(self, scan_id: int): + """Get a specific scan object by ID. Args: scan_id (int): The ID of the scan object to retrieve. diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 064715c..c9db2a9 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -2,7 +2,6 @@ import warnings import numpy as np import nibabel as nib -from enum import Enum from pathlib import Path from .header import Header from brkraw.api.pvobj.base import BaseBufferHandler @@ -10,7 +9,7 @@ from brkraw.api.data import Scan from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Union + from typing import Optional, Union, Literal from brkraw.api.plugin import Plugged @@ -18,18 +17,10 @@ dict(EPI=('mm', 'sec')) -class ScaleMode(Enum): - NONE = 0 - APPLY = 1 - HEADER = 2 - - class BaseMethods(BaseBufferHandler): - def set_scale_mode(self, scale_mode:Optional[ScaleMode]=None): - if scale_mode: - self.scale_mode = scale_mode - else: - self.scale_mode = ScaleMode.HEADER + def set_scale_mode(self, + scale_mode: Optional[Literal['header', 'apply']] = None): + self.scale_mode = scale_mode or 'header' @staticmethod def get_dataobj(scanobj:'Scan', @@ -48,13 +39,15 @@ def get_dataobj(scanobj:'Scan', return dataobj @staticmethod - def get_affine(scanobj:'Scan', reco_id:Optional[int] = None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] + def get_affine(scanobj:'Scan', reco_id: Optional[int] = None, + subj_type: Optional[str]=None, + subj_position: Optional[str]=None): + return BaseMethods.get_affine_dict(scanobj, reco_id, + subj_type, subj_position)['affine'] @staticmethod - def get_data_dict(scanobj:'Scan', - reco_id:Optional[int] = None): + def get_data_dict(scanobj: 'Scan', + reco_id: Optional[int] = None): datarray_analyzer = scanobj.get_datarray_analyzer(reco_id) axis_labels = datarray_analyzer.shape_desc dataarray = datarray_analyzer.get_dataarray() @@ -70,8 +63,9 @@ def get_data_dict(scanobj:'Scan', } @staticmethod - def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None): + def get_affine_dict(scanobj: 'Scan', reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): affine_analyzer = scanobj.get_affine_analyzer(reco_id) subj_type = subj_type or affine_analyzer.subj_type subj_position = subj_position or affine_analyzer.subj_position @@ -84,25 +78,28 @@ def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, } @staticmethod - def get_nifti1header(scanobj:'Scan', reco_id:Optional[int] = None, - scale_mode:Optional['ScaleMode']=None): + def get_nifti1header(scanobj: 'Scan', reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): if reco_id: scanobj.set_scaninfo(reco_id) - scale_mode = scale_mode or ScaleMode.HEADER + scale_mode = scale_mode or 'header' return Header(scanobj.info, scale_mode).get() @staticmethod - def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, - scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): + def get_nifti1image(scanobj: 'Scan', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional['Plugged'] = None, + plugin_kws: dict = None): if plugin and plugin.type == 'tonifti': with plugin(scanobj, **plugin_kws) as p: dataobj = p.get_dataobj(bool(scale_mode)) affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) header = p.get_nifti1header() else: - scale_mode = scale_mode or ScaleMode.HEADER + scale_mode = scale_mode or 'header' dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) header = BaseMethods.get_nifti1header(scanobj, reco_id, scale_mode) @@ -110,7 +107,8 @@ def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, class BasePlugin(Scan, BaseMethods): - def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], verbose: bool=False, **kwargs): + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], + verbose: bool=False, **kwargs): super().__init__(pvobj, **kwargs) self.verbose = verbose diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 15ee638..378606d 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -1,16 +1,17 @@ from __future__ import annotations import warnings from nibabel.nifti1 import Nifti1Header -from typing import TYPE_CHECKING, Union +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Literal from brkraw.api.data import ScanInfo - from .base import ScaleMode class Header: - def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): + def __init__(self, scaninfo: 'ScanInfo', + scale_mode: Optional[Literal['header', 'apply']] = None): self.info = scaninfo - self.scale_mode = int(scale_mode.value) + self.scale_mode = 1 if scale_mode == 'header' else 0 self.nifti1header = Nifti1Header() self.nifti1header.default_x_flip = False self._set_scale_params() @@ -49,7 +50,7 @@ def _set_time_step(self): self.nifti1header['slice_duration'] = time_step / num_slices def _set_scale_params(self): - if self.scale_mode == 2: + if self.scale_mode: self.nifti1header['scl_slope'] = self.info.dataarray['slope'] self.nifti1header['scl_inter'] = self.info.dataarray['offset'] diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py index 989e6d4..96a442f 100644 --- a/brkraw/app/tonifti/scan.py +++ b/brkraw/app/tonifti/scan.py @@ -1,17 +1,20 @@ from __future__ import annotations +from collections import OrderedDict from pathlib import Path from brkraw.api.data import Scan from brkraw.api.pvobj import PvScan, PvReco, PvFiles -from collections import OrderedDict +from .base import BaseMethods from typing import TYPE_CHECKING -from .base import BaseMethods, ScaleMode if TYPE_CHECKING: - from typing import Union, Optional + from typing import Union, Optional, Literal from brkraw.api.plugin import Plugged class ScanToNifti(Scan, BaseMethods): - def __init__(self, *paths: Path, scale_mode: Optional[ScaleMode]=None, **kwargs): + def __init__(self, + *paths: Path, + scale_mode: Optional[Literal['header', 'apply']] = None, + **kwargs): """_summary_ Args: @@ -76,35 +79,57 @@ def _is_pvreco(path: 'Path') -> Union[bool, 'OrderedDict']: return contents return False - def get_affine(self, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - return super().get_affine(scanobj=self, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + def get_affine(self, reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): + return super().get_affine(scanobj = self, + reco_id = reco_id, + subj_type = subj_type, + subj_position = subj_position) - def get_dataobj(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + def get_dataobj(self, reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True + scale_correction = False if not scale_mode or scale_mode == 'header' else True if reco_id: self.set_scaninfo(reco_id) - return super().get_dataobj(scanobj=self, reco_id=reco_id, scale_correction=scale_correction) + return super().get_dataobj(scanobj = self, + reco_id = reco_id, + scale_correction = scale_correction) - def get_data_dict(self, reco_id:Optional[int]=None): + def get_data_dict(self, reco_id: Optional[int] = None): if reco_id: self.set_scaninfo(reco_id) return super().get_data_dict(scanobj=self, reco_id=reco_id) - def get_affine_dict(self, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): + def get_affine_dict(self, reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): if reco_id: self.set_scaninfo(reco_id) - return super().get_affine_dict(scanobj=self, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + return super().get_affine_dict(scanobj = self, + reco_id = reco_id, + subj_type = subj_type, + subj_position = subj_position) - def get_nifti1header(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + def get_nifti1header(self, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode return super().get_nifti1header(self, reco_id, scale_mode).get() - def get_nifti1image(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): + def get_nifti1image(self, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional['Plugged'] = None, + plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode - return super().get_nifti1image(self, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file + return super().get_nifti1image(self, + reco_id, + scale_mode, + subj_type, + subj_position, + plugin, + plugin_kws) \ No newline at end of file diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py index 3c037c3..bc87ecc 100644 --- a/brkraw/app/tonifti/study.py +++ b/brkraw/app/tonifti/study.py @@ -1,21 +1,25 @@ +"""Docstring for public module D100, D200.""" from __future__ import annotations from brkraw.api.data import Study -from .base import BaseMethods, ScaleMode +from .base import BaseMethods from .scan import ScanToNifti -from typing import TYPE_CHECKING, Optional - +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Literal from pathlib import Path from brkraw.api.plugin import Plugged class StudyToNifti(Study, BaseMethods): - def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): + """public class docstring.""" + def __init__(self, path:'Path', + scale_mode: Optional[Literal['header', 'apply']] = None): super().__init__(path) self.set_scale_mode(scale_mode) self._cache = {} - def get_scan(self, scan_id:int, reco_id:Optional[int] = None): + def get_scan(self, scan_id: int, + reco_id: Optional[int] = None): if scan_id not in self._cache.keys(): pvscan = super().get_scan(scan_id).retrieve_pvobj() self._cache[scan_id] = ScanToNifti(pvobj=pvscan, @@ -23,43 +27,81 @@ def get_scan(self, scan_id:int, reco_id:Optional[int] = None): study_address=id(self)) return self._cache[scan_id] - def get_scan_pvobj(self, scan_id:int, reco_id:Optional[int] = None): - return super().get_scan(scan_id).retrieve_pvobj() + def get_scan_pvobj(self, scan_id: int, + reco_id: Optional[int] = None): + return super().get_scan(scan_id=scan_id, + reco_id=reco_id).retrieve_pvobj() - def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): - return self.get_scan(scan_id).get_scaninfo(reco_id, get_analyzer=True) + def get_scan_analyzer(self, + scan_id: int, + reco_id: Optional[int] = None): + return self.get_scan(scan_id).get_scaninfo(reco_id=reco_id, + get_analyzer=True) - def get_affine(self, scan_id:int, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): + def get_affine(self, + scan_id: int, + reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): scanobj = self.get_scan(scan_id, reco_id) - return super().get_affine(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + return super().get_affine(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) - def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + def get_dataobj(self, scan_id: int, reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True + scale_correction = False if not scale_mode or scale_mode == 'header' else True scanobj = self.get_scan(scan_id, reco_id) - return super().get_dataobj(scanobj=scanobj, reco_id=reco_id, scale_correction=scale_correction) + return super().get_dataobj(scanobj=scanobj, + reco_id=reco_id, + scale_correction=scale_correction) - def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): + def get_data_dict(self, scan_id: int, + reco_id: Optional[int] = None): scanobj = self.get_scan(scan_id, reco_id) - return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) + return super().get_data_dict(scanobj=scanobj, + reco_id=reco_id) - def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - scanobj = self.get_scan(scan_id, reco_id) - return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + def get_affine_dict(self, + scan_id: int, + reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().get_affine_dict(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) - def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + def get_nifti1header(self, + scan_id: int, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scanobj = self.get_scan(scan_id, reco_id) - return super().get_nifti1header(scanobj, scale_mode).get() + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().get_nifti1header(scanobj=scanobj, + scale_mode=scale_mode).get() - def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): + def get_nifti1image(self, + scan_id: int, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional['Plugged'] = None, + plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode - scanobj = self.get_scan(scan_id, reco_id) - return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().get_nifti1image(scanobj=scanobj, + reco_id=reco_id, + scale_mode=scale_mode, + subj_type=subj_type, + subj_position=subj_position, + plugin=plugin, + plugin_kws=plugin_kws) \ No newline at end of file diff --git a/brkraw/config.py b/brkraw/config.py deleted file mode 100644 index 545d23e..0000000 --- a/brkraw/config.py +++ /dev/null @@ -1,87 +0,0 @@ -import toml -from pathlib import Path -from brkraw import __version__ - -class ConfigManager: - """ - Manage the configuration settings. - - Notes: - - Provides methods to ensure the existence of the config directory, load or create the configuration, set configuration values, and retrieve configuration values. - """ - def __init__(self): - """ - Initialize the configuration object. - - Notes: - - Sets up the home directory, config directory, and config file paths. - - Ensures the existence of the config directory and loads or creates the configuration. - """ - self.home_dir = Path.home() - self.config_dir = self.home_dir / '.brkraw' - self.config_file = self.config_dir / 'config.toml' - self.ensure_config_dir_exists() - self.load_or_create_config() - - def ensure_config_dir_exists(self): - """ - Ensure the existence of the configuration directory. - - Notes: - - Creates the config directory if it does not already exist. - - Also creates 'plugin' and 'bids' directories within the config directory. - """ - if not self.config_dir.exists(): - self.config_dir.mkdir() - (self.config_dir / 'plugin').mkdir() - (self.config_dir / 'bids').mkdir() - - def load_or_create_config(self): - """ - Load an existing configuration file or create a new one if it does not exist. - - Notes: - - If the config file does not exist, a default configuration is created and saved. - - Otherwise, the existing configuration is loaded from the file. - """ - if not self.config_file.exists(): - default_config = { - 'version': __version__ - } - with open(self.config_file, 'w') as f: - toml.dump(default_config, f) - self.config = default_config - else: - with open(self.config_file, 'r') as f: - self.config = toml.load(f) - - def set(self, key, value): - """ - Set a key-value pair in the configuration and save the updated configuration to the file. - - Args: - key: The key to set in the configuration. - value: The value to associate with the key. - - Notes: - - Updates the configuration with the provided key-value pair. - - Persists the updated configuration to the config file. - """ - self.config[key] = value - with open(self.config_file, 'w') as f: - toml.dump(self.config, f) - - def get(self, key): - """ - Retrieve the value associated with the given key from the configuration. - - Args: - key: The key to retrieve the value for. - - Returns: - The value associated with the key in the configuration, or None if the key is not found. - - Notes: - - Returns the value corresponding to the provided key from the configuration. - """ - return self.config.get(key) From 92865a18966851f39bffd80ec83981d57cc5b401 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 11:42:09 -0400 Subject: [PATCH 02/16] feat(sneppits): Add new plugin architecture 'sneppits' --- brkraw/api/config/config.yaml | 38 +++-- brkraw/api/config/fetcher/__init__.py | 3 + brkraw/api/config/fetcher/base.py | 101 ++++++++++++ brkraw/api/config/fetcher/snippets.py | 113 +++++++++++++ brkraw/api/config/manager.py | 228 ++++++++++++------------- brkraw/api/config/snippet/__init__.py | 6 + brkraw/api/config/snippet/app.py | 8 + brkraw/api/config/snippet/base.py | 13 ++ brkraw/api/config/snippet/bids.py | 8 + brkraw/api/config/snippet/loader.py | 84 ++++++++++ brkraw/api/config/snippet/plugin.py | 229 ++++++++++++++++++++++++++ brkraw/api/config/snippet/preset.py | 8 + brkraw/api/data/study.py | 71 ++++++-- brkraw/api/helper/recipe.py | 40 +++-- brkraw/api/plugin/__init__.py | 5 - brkraw/app/tonifti/base.py | 64 +++++-- 16 files changed, 849 insertions(+), 170 deletions(-) create mode 100644 brkraw/api/config/fetcher/__init__.py create mode 100644 brkraw/api/config/fetcher/base.py create mode 100644 brkraw/api/config/fetcher/snippets.py create mode 100644 brkraw/api/config/snippet/__init__.py create mode 100644 brkraw/api/config/snippet/app.py create mode 100644 brkraw/api/config/snippet/base.py create mode 100644 brkraw/api/config/snippet/bids.py create mode 100644 brkraw/api/config/snippet/loader.py create mode 100644 brkraw/api/config/snippet/plugin.py create mode 100644 brkraw/api/config/snippet/preset.py delete mode 100644 brkraw/api/plugin/__init__.py diff --git a/brkraw/api/config/config.yaml b/brkraw/api/config/config.yaml index 3c90c93..04176d3 100644 --- a/brkraw/api/config/config.yaml +++ b/brkraw/api/config/config.yaml @@ -1,24 +1,26 @@ # default configuration for brkraw -plugin: - repo: - - https://github.com/brkraw/brkraw-plugin.git/plugin - template: - - boilerplate - -preset: +snippets: repo: - - https://github.com/brkraw/brkraw-plugin.git/preset - template: - - boilerplate + - name: brkraw-snippets + url: https://github.com/brkraw/brkraw-snippets.git + plugin: + path: plugin + template: + - myplugin + preset: + path: preset + template: + - mypreset + spec: + path: spec + recipe: + path: recipe -bids: - spec: - repo: - - https://github.com/brkraw/brkraw-bids.git/spec - recipes: - repo: - - https://github.com/brkraw/brkraw-bids.git/recipes +studyinfo: + recipe: default app: tonifti: - output_format: ___ \ No newline at end of file + output_filename: + format: ___ + filter: \ No newline at end of file diff --git a/brkraw/api/config/fetcher/__init__.py b/brkraw/api/config/fetcher/__init__.py new file mode 100644 index 0000000..b98bc4b --- /dev/null +++ b/brkraw/api/config/fetcher/__init__.py @@ -0,0 +1,3 @@ +from .snippets import Snippets as SnippetsFetcher + +__all__ = ['SnippetsFetcher'] \ No newline at end of file diff --git a/brkraw/api/config/fetcher/base.py b/brkraw/api/config/fetcher/base.py new file mode 100644 index 0000000..a7e9f0c --- /dev/null +++ b/brkraw/api/config/fetcher/base.py @@ -0,0 +1,101 @@ +from __future__ import annotations +import re +import warnings +import requests +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import List, Tuple + from typing import Optional, Union, Generator + + +class Fetcher: + """Base Fetcher class + + Returns: + _type_: _description_ + + Yields: + _type_: _description_ + """ + _auth: Union[List[Tuple[str, str]], Tuple[str, str]] + repos: dict + + @staticmethod + def is_connected(): + try: + Fetcher._fetch_from_url('https://api.github.com') + except (requests.ConnectTimeout, requests.ConnectionError, requests.RequestException): + return False + return True + + def _set_auth(self): + """Set authentication to access repository""" + if isinstance(self.repos, list): + self._auth = [self._fetch_auth(repo) for repo in self.repos] + + @staticmethod + def _fetch_auth(repo_dict: dict): + if 'auth' in repo_dict: + username = repo_dict['auth']['username'] + token = repo_dict['auth']['token'] + return (username, token) if username and token else None + return None + + @staticmethod + def _walk_github_repo(repo_url: dict, path: Optional['str'] = None, auth: Tuple[str, str] = None): + """Recursively walk through directories in a GitHub repository.""" + base_url = Fetcher._decode_github_repo(repo_url=repo_url, path=path) + return Fetcher._walk_dir(url=base_url, auth=auth) + + @staticmethod + def _walk_dir(url, path='', auth: Tuple[str, str] = None): + contents = Fetcher._fetch_from_url(url=url, auth=auth).json() + dirs, files = Fetcher._fetch_directory_contents(contents) + yield {'path':path, + 'dirs':{d['name']:d['url'] for d in dirs}, + 'files':{f['name']:f['download_url'] for f in files}} + + for dir in dirs: + new_path = f"{path}/{dir['name']}" if path else dir['name'] + new_url = dir['url'] + yield from Fetcher._walk_dir(url=new_url, path=new_path, auth=auth) + + @staticmethod + def _fetch_directory_contents(contents): + dirs, files = [], [] + for i, item in enumerate(contents): + if item['type'] == 'dir': + dirs.append(item) + elif item['type'] == 'file': + files.append(item) + return dirs, files + + @staticmethod + def _decode_github_repo(repo_url: dict, path: Optional['str'] = None): + ptrn_github = r'https://(?:[^/]+\.)?github\.com/(?P[^/]+)/(?P[^/.]+)(?:\.git])?' + if matched := re.match(ptrn_github, repo_url): + owner = matched['owner'] + repo = matched['repo'] + return f"https://api.github.com/repos/{owner}/{repo}/contents/{path}" if path \ + else f"https://api.github.com/repos/{owner}/{repo}/contents" + + @staticmethod + def _fetch_from_url(url: str, auth: Tuple[str, str] = None) -> Optional[requests.Response]: + response = requests.get(url, auth=auth) + if response.status_code == 200: + return response + else: + warnings.warn(f"Failed to retrieve contents: {response.status_code}", UserWarning) + return None + + @staticmethod + def _download_buffer(url: dict, + chunk_size: int = 8192, + auth: Tuple[str, str] = None) -> Union[Generator, bool]: + try: + response = requests.get(url, stream=True, auth=auth) + response.raise_for_status() + return response.iter_content(chunk_size=chunk_size) + except requests.RequestException as e: + warnings.warn(f'Error downloading the file: {e}') + return False diff --git a/brkraw/api/config/fetcher/snippets.py b/brkraw/api/config/fetcher/snippets.py new file mode 100644 index 0000000..17d108c --- /dev/null +++ b/brkraw/api/config/fetcher/snippets.py @@ -0,0 +1,113 @@ +"""Docstring.""" + +from __future__ import annotations +import os +from pathlib import Path +from .base import Fetcher +from brkraw.api.config.snippet import PlugInSnippet +from brkraw.api.config.snippet import BIDSSnippet +from brkraw.api.config.snippet import PresetSnippet +from brkraw.api.config.snippet import AppSnippet +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import List + from typing import Tuple, Optional, Literal, Union + from brkraw.api.config.snippet.base import Snippet + + +class Snippets(Fetcher): + """Class aggregate all available plugins + """ + path: Optional[Path] + mode: Literal['plugin', 'preset', 'bids', 'app'] + is_cache: bool + _template: List = [] + _remote_snippets: List = [] + _local_snippets: List = [] + _template_snippets: List = [] + + def __init__(self, + repos: dict, + mode: Literal['plugin', 'preset', 'bids', 'app'], + path: Tuple[Optional['Path'], 'bool'] = (None, False) + ) -> None: + """_summary_ + + Args: + repos (dict): _description_ + path (Path, optional): _description_. Defaults to None. + cache (bool, optional): _description_. Defaults to False. + """ + self.repos = repos + self.mode = mode + self.path, self.is_cache = path + self._set_auth() + self._fetch_local_contents() + self._template = [c[mode]['template'] for c in repos if 'template' in c[mode]] + + def _fetch_local_contents(self) -> Optional[list]: + """ + """ + if self.is_cache: + return None + if self.mode in ['plugin', 'preset', 'bids']: + contents = [] + for path, dirs, files in os.walk(self.path): + child = {'path':path, + 'dirs':{d:Path(path) / d for d in dirs}, + 'files':{f:Path(path) / f for f in files}} + contents.append(child) + self._convert_contents_to_snippets([contents], remote=False) + + def _is_template(self, repo_id: int, snippet: Snippet) -> bool: + return any(snippet.name == t for t in self._template[repo_id]) + + def _fetch_remote_contents(self) -> None: + """ built-in plugins from build-in dir + """ + if self.repos and self.mode in ['plugin', 'preset', 'bids']: + contents = [self._walk_github_repo(repo_url=repo['url'], + path=repo[self.mode]['path'], + auth=self._auth[i]) for i, repo in enumerate(self.repos)] + self._convert_contents_to_snippets(contents=contents, remote=True) + + def _convert_contents_to_snippets(self, contents: list, remote: bool = False) -> None: + for repo_id, contents in enumerate(contents): + for c in contents: + if remote: + snippet = self._snippet(contents=c, auth=self._auth[repo_id], remote=remote) + self._store_remote_snippet(repo_id=repo_id, snippet=snippet) + else: + snippet = self._snippet(contents=c, remote=remote) + if snippet.is_valid and \ + snippet.name not in [s.name for s in self._local_snippets]: + self._local_snippets.append(snippet) + + def _store_remote_snippet(self, repo_id: int, snippet: Snippet): + if not snippet.is_valid: + return None + if self._is_template(repo_id, snippet) and \ + snippet.name not in [s.name for s in self._template_snippets]: + self._template_snippets.append(snippet) + elif not self._is_template(repo_id, snippet) and \ + snippet.name not in [s.name for s in self._remote_snippets]: + self._remote_snippets.append(snippet) + + @property + def _snippet(self): + if self.mode == 'plugin': + return PlugInSnippet + elif self.mode == 'preset': + return PresetSnippet + elif self.mode == 'bids': + return BIDSSnippet + else: + return AppSnippet + + @property + def remote(self): + return self._remote_snippets + + @property + def local(self): + return self._local_snippets diff --git a/brkraw/api/config/manager.py b/brkraw/api/config/manager.py index f60e150..3c94050 100644 --- a/brkraw/api/config/manager.py +++ b/brkraw/api/config/manager.py @@ -1,145 +1,149 @@ +"""Manager module for configuring, loading, or creating configuration files. + +This module facilitates the management of configuration settings within the application, +allowing configurations to be handled internally without file creation unless specifically +requested by the user through CLI to create them in the home folder. +""" + from __future__ import annotations import yaml +import warnings from pathlib import Path -from brkraw import __version__ +from .fetcher import SnippetsFetcher from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Literal + from typing import Tuple, Literal, Union, Optional + class Manager: - """ - Make this configuration works internally without create file but if user use cli to create, do it so (create on home folder) - Manage the configuration settings. + """Manages the configuration settings for the application. - Notes: - - Provides methods to ensure the existence of the config directory, load or create the configuration, - set configuration values, and retrieve configuration values. + This class ensures the existence of the configuration directory, loads or creates the configuration file, + sets configuration values, and retrieves configuration values. It operates both globally and locally + depending on the user's choice and the operational context. """ - def __init__(self): - """ - Initialize the configuration object. + config: dict = {} + + def __init__(self, tmpdir: Optional[Path] = None) -> None: + """Initializes the configuration manager. - Notes: - - Sets up the home directory, config directory, and config file paths. - - Ensures the existence of the config directory and loads or creates the configuration. - """ - self.home = Path.home() - self.package = Path(__file__).absolute().parent - self.local_dir = Path.cwd() - self.global_dir = self.home_dir / '.brkraw' - self.fname = 'config.yaml' - - def config_file(self, target: Literal['local', 'global'] = 'global'): - dir = self.global_dir if target == 'global' else self.local_dir - if dir.exists() and (dir / self.fname).exists(): - return dir / self.fname - else: - return self.package / self.fname + This constructor sets up paths for the home directory, global and local configuration directories, + and configuration file. It ensures the configuration directory exists and loads or creates the + configuration based on its presence. - def ensure_config_dir_exists(self): + Args: + tmpdir (Optional[Path]): Temporary directory for storing configurations, defaults to the home directory. """ - Ensure the existence of the configuration directory. + self._home_dir = Path.home() + self._default_dir = Path(__file__).absolute().parent + self._local_dir = Path.cwd() / '.brkraw' + self._global_dir = self._home_dir / '.brkraw' + self._fname = 'config.yaml' + self._tmpdir = tmpdir or self._home_dir / '.tmp' + self.load() + + @property + def created(self) -> Union[Literal['global', 'local'], list[str], bool]: + """"Checks and returns the location where the configuration folder was created. - Notes: - - Creates the config directory if it does not already exist. - - Also creates 'plugin' and 'bids' directories within the config directory. + Returns: + Union[Literal['global', 'local'], list[str], bool]: Returns 'global' or 'local' if the config folder was created at that level, + a list of locations if multiple exist, or False if no config folder is created. """ - if not self.config_dir.exists(): - self.config_dir.mkdir() - (self.config_dir / 'plugin').mkdir() - (self.config_dir / 'preset').mkdir() - (self.config_dir / 'bids').mkdir() + created = [(f / self._fname).exists() for f in [self._global_dir, self._local_dir]] + checked = [loc for i, loc in enumerate(['global', 'local']) if created[i]] + checked = checked.pop() if len(checked) == 1 else checked + return checked or False - def load(self, target: Literal['local', 'global'] = 'global'): - """ - Load an existing configuration file or create a new one if it does not exist. + @property + def config_dir(self) -> 'Path': + """Determines and returns the appropriate configuration directory based on the existence and location of the config file. - Notes: - - If the config file does not exist, a default configuration is created and saved. - - Otherwise, the existing configuration is loaded from the file. + Returns: + Path: Path to the configuration directory based on its existence and scope (global or local). """ - if not self.config_file.exists(): - with open(self.installed_dir / 'config.yalm') as f: - self.config = yaml.safe_load(f) + if isinstance(self.created, list): + return self._local_dir + elif isinstance(self.created, str): + return self._local_dir if self.created == 'local' else self._global_dir + return self._default_dir + + def load(self) -> None: + """Loads an existing configuration file or creates a new one if it does not exist, filling the 'config' dictionary with settings.""" + with open(self.config_dir / self._fname) as f: + self.config = yaml.safe_load(f) - def create(self, target: Literal['local', 'global'] = 'global'): - """_summary_ + def create(self, target: Literal['local', 'global'] = 'local', + force: bool = False) -> bool: + """Creates a configuration file at the specified location. + + Args: + target (Literal['local', 'global']): Target directory for creating the configuration file, defaults to 'local'. + force (bool): If True, overwrites the existing configuration file, defaults to False. Returns: - _type_: _description_ + bool: True if the file was created successfully, False otherwise. """ - - # use default config if no configure created, - # for downloading location, if no configuration folder created (~/.brkraw), use local folder - # also check local folder first (plugin, preset, bids), where you run a command - def set(self, key, value): + if not self.config: + self.load() + config_dir = self._local_dir if target == 'local' else self._global_dir + config_dir.mkdir(exist_ok=True) + config_file = config_dir / self._fname + if config_file.exists(): + if not force: + warnings.warn("Config file exists, please use 'force' option if you want overwrite.", + UserWarning) + return False + with open(config_dir / self._fname, 'w') as f: + yaml.safe_dump(self.config, f, sort_keys=False) + + def get_fetcher(self, mode: Literal['plugin', 'preset', 'bids', 'app']) -> SnippetsFetcher: + """Returns the appropriate fetcher based on the mode. + + Args: + mode (Literal['plugin', 'preset', 'bids', 'app']): The mode determining which type of fetcher to return. + + Returns: + SnippetsFetcher: An instance of SnippetsFetcher configured for the specified mode. """ - Set a key-value pair in the configuration and save the updated configuration to the file. + if mode in ['plugin', 'preset', 'bids']: + return self._get_snippet_fetcher(mode) + else: + return self._get_app_fetcher() + + def _get_snippet_fetcher(self, mode: Literal['plugin', 'preset', 'bids']) -> 'SnippetsFetcher': + """Retrieves a configured SnippetsFetcher for the specified mode to handle fetching of snippets. Args: - key: The key to set in the configuration. - value: The value to associate with the key. + mode (Literal['plugin', 'preset', 'bids']): The specific category of snippets to fetch. - Notes: - - Updates the configuration with the provided key-value pair. - - Persists the updated configuration to the config file. + Returns: + SnippetsFetcher: A fetcher configured for fetching snippets of the specified type. """ - self.config[key] = value - with open(self.config_file, 'w') as f: - yaml.dump(self.config, f, sort_keys=False) + return SnippetsFetcher(repos=self.config['snippets']['repo'], + mode=mode, + path=self._check_dir(mode)) + + def _get_app_fetcher(self) -> 'SnippetsFetcher': + """Retrieves a SnippetsFetcher for application handling. - def get(self, key): + Returns: + SnippetsFetcher: A fetcher configured to handle application-specific tasks. """ - Retrieve the value associated with the given key from the configuration. + return SnippetsFetcher(repos=self.config['app'], + mode='app') + + def _check_dir(self, type_: Literal['plugin', 'preset', 'bids']) -> Tuple['Path', bool]: + """Checks and prepares the directory for the specified snippet type, ensuring it exists. Args: - key: The key to retrieve the value for. + type_ (Literal['plugin', 'preset', 'bids']): The type of snippet for which the directory is checked. Returns: - The value associated with the key in the configuration, or None if the key is not found. - - Notes: - - Returns the value corresponding to the provided key from the configuration. + Tuple[Path, bool]: A tuple containing the path to the directory and a cache flag indicating + if caching is necessary (True if so). """ - return self.config.get(key) - -# def get_scan_time(self, visu_pars=None): -# import datetime as dt -# subject_date = get_value(self._subject, 'SUBJECT_date') -# subject_date = subject_date[0] if isinstance(subject_date, list) else subject_date -# pattern_1 = r'(\d{2}:\d{2}:\d{2})\s+(\d+\s\w+\s\d{4})' -# pattern_2 = r'(\d{4}-\d{2}-\d{2})[T](\d{2}:\d{2}:\d{2})' -# if re.match(pattern_1, subject_date): -# # start time -# start_time = dt.time(*map(int, re.sub(pattern_1, r'\1', subject_date).split(':'))) -# # date -# date = dt.datetime.strptime(re.sub(pattern_1, r'\2', subject_date), '%d %b %Y').date() -# # end time -# if visu_pars != None: -# last_scan_time = get_value(visu_pars, 'VisuAcqDate') -# last_scan_time = dt.time(*map(int, re.sub(pattern_1, r'\1', last_scan_time).split(':'))) -# acq_time = get_value(visu_pars, 'VisuAcqScanTime') / 1000.0 -# time_delta = dt.timedelta(0, acq_time) -# scan_time = (dt.datetime.combine(date, last_scan_time) + time_delta).time() -# return dict(date=date, -# start_time=start_time, -# scan_time=scan_time) -# elif re.match(pattern_2, subject_date): -# # start time -# # subject_date = get_value(self._subject, 'SUBJECT_date')[0] -# start_time = dt.time(*map(int, re.sub(pattern_2, r'\2', subject_date).split(':'))) -# # date -# date = dt.date(*map(int, re.sub(pattern_2, r'\1', subject_date).split('-'))) - -# # end date -# if visu_pars != None: -# scan_time = get_value(visu_pars, 'VisuCreationDate')[0] -# scan_time = dt.time(*map(int, re.sub(pattern_2, r'\2', scan_time).split(':'))) -# return dict(date=date, -# start_time=start_time, -# scan_time=scan_time) -# else: -# raise Exception(ERROR_MESSAGES['NotIntegrated']) - -# return dict(date=date, -# start_time=start_time) \ No newline at end of file + path, cache = (self.config_dir / type_, False) if self.created else (self._tmpdir, True) + if not path.exists(): + path.mkdir() + return path, cache \ No newline at end of file diff --git a/brkraw/api/config/snippet/__init__.py b/brkraw/api/config/snippet/__init__.py new file mode 100644 index 0000000..350fa09 --- /dev/null +++ b/brkraw/api/config/snippet/__init__.py @@ -0,0 +1,6 @@ +from .plugin import PlugIn as PlugInSnippet +from .preset import Preset as PresetSnippet +from .bids import BIDS as BIDSSnippet +from .app import App as AppSnippet + +__all__ = ['PlugInSnippet', 'PresetSnippet', 'BIDSSnippet', 'AppSnippet'] \ No newline at end of file diff --git a/brkraw/api/config/snippet/app.py b/brkraw/api/config/snippet/app.py new file mode 100644 index 0000000..c07126a --- /dev/null +++ b/brkraw/api/config/snippet/app.py @@ -0,0 +1,8 @@ +"""Snippet for App configuration""" + +from .base import Snippet + + +class App(Snippet): + def __init__(self): + raise NotImplementedError diff --git a/brkraw/api/config/snippet/base.py b/brkraw/api/config/snippet/base.py new file mode 100644 index 0000000..0874490 --- /dev/null +++ b/brkraw/api/config/snippet/base.py @@ -0,0 +1,13 @@ +"""BaseSnippet for provide platform for developing Snippet to configure and/or interface with other apps in BrkRaw ecosystem. +The current base is minimal structure as currently only PluginSnippet is available, will be expended to contains shared +method and attributes for Snippet classes +""" + +from brkraw.api.config.fetcher.base import Fetcher + + +class Snippet(Fetcher): + name: str + version: str + type: str + is_valid: bool diff --git a/brkraw/api/config/snippet/bids.py b/brkraw/api/config/snippet/bids.py new file mode 100644 index 0000000..f4a79d0 --- /dev/null +++ b/brkraw/api/config/snippet/bids.py @@ -0,0 +1,8 @@ +"""Snippet for BIDS converter""" + +from .base import Snippet + + +class BIDS(Snippet): + def __init__(self): + raise NotImplementedError \ No newline at end of file diff --git a/brkraw/api/config/snippet/loader.py b/brkraw/api/config/snippet/loader.py new file mode 100644 index 0000000..b80999b --- /dev/null +++ b/brkraw/api/config/snippet/loader.py @@ -0,0 +1,84 @@ +"""This module implements a ModuleLoader class that allows importing Python modules from either +a bytes object or a file path. + +It is designed to be used within PlugIn Snippets to dynamically load modules without requiring them to be +pre-installed or located in a standard file system path. +""" + +from __future__ import annotations +import sys +import importlib +from importlib.machinery import ModuleSpec +from importlib.abc import SourceLoader +from pathlib import Path +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Union, Optional + + +class ModuleLoader(SourceLoader): + """A custom loader that imports a Python module from a bytes object or from a filepath. + + This loader supports dynamic execution of Python code, which can be especially useful in environments + where plugins or modules need to be loaded from non-standard locations or directly from memory. + + Attributes: + data (bytes, optional): The bytes object containing the source code of the module. + filepath (Path, optional): The file path to the module if it's not loaded from bytes. + """ + def __init__(self, module: Union[Path, bytes]): + """Initializes the ModuleLoader with either a path to the module or its bytes content. + + Args: + module (Union[Path, bytes]): The source of the module, either as a path or bytes. + """ + if isinstance(module, bytes): + self.data, self.filepath = module, None + else: + self.data, self.filepath = None, module + + def get_data(self, path: Optional[Path]): + """Fetches the module's data from bytes or a file. + + Args: + path (Path, optional): The path from which to load the module data if it's not already provided as bytes. + + Returns: + bytes: The raw data of the module. + """ + if self.data: + return self.data + elif path and Path(path).is_file(): + with open(path, 'rb') as file: + return file.read() + else: + raise FileNotFoundError(f"No such file: {path}") + + def get_filename(self, fullname: Optional[str] = None): + """Retrieves the filename of the module being loaded. + + Args: + fullname (str, optional): The full name of the module. + + Returns: + str: The filepath if it's defined, otherwise a dummy string for byte-loaded modules. + """ + return str(self.filepath) if self.filepath else "" + + def get_module(self, name: str) -> ModuleSpec: + """Creates and returns a module object from the provided data. + + This method constructs a module using the spec provided by this loader. + + Args: + name (str): The name of the module. + + Returns: + ModuleSpec: The module object loaded and ready for use. + """ + spec = ModuleSpec(name=name, loader=self, origin=self.get_filename()) + module = importlib.util.module_from_spec(spec) + self.exec_module(module) + sys.modules[name] = module + return module + \ No newline at end of file diff --git a/brkraw/api/config/snippet/plugin.py b/brkraw/api/config/snippet/plugin.py new file mode 100644 index 0000000..3d41f82 --- /dev/null +++ b/brkraw/api/config/snippet/plugin.py @@ -0,0 +1,229 @@ +"""Provides a PlugInSnippet class that allows for plugin source code or code loaded in memory +to be imported as a Python module. This extends the functionality of the brkraw module at the +application level. + +This class facilitates the quick testing of code without the need for environment setup for plugin downloads. + +Changes: + 2024.5.1: Initial design and implementation of the PlugIn Snippet architecture. Initially tested for the tonifti app. + TODO: The PlugIn module will be a standard method to extend functionality across the entire apps. + +Author: Sung-Ho Lee (shlee@unc.edu) +""" + +from __future__ import annotations +import sys +import re +import yaml +import warnings +import subprocess as subproc +from pathlib import Path +from tqdm import tqdm +from .base import Snippet +from .loader import ModuleLoader +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Tuple, Dict, Optional, Union + + +class PlugIn(Snippet): + """Handles the inspection and management of plugins, either locally or from remote sources. + + This class supports dynamic loading of plugins into memory for immediate use without the need for disk storage, + facilitating rapid development and testing of plugin functionalities. + """ + _remote: bool + _module_loaded: bool + _dependencies_tested: bool = False + _auth: Tuple[str, str] + _data: Dict = {} + _contents: Dict + + def __init__(self, + contents: dict, + auth: Optional[Tuple[str, str]] = None, + remote: bool = False): + """Initializes the plugin with specified contents, authentication for remote access, and remote status. + + Args: + contents (dict): Contains keys of path, dirs, and files, similar to os.walk but structured as a dictionary. + Each directory and file is also mapped as a key (filename) to a value (path or download_url). + auth (Tuple[str, str], optional): Credentials for using the GitHub API if needed. + remote (bool): True if the plugin is loaded remotely, False otherwise. + """ + self._auth = auth + self._contents = contents + self._remote = remote + self._content_parser() + + def set(self, skip_dependency_check: bool = False, *args, **kwargs): + """Sets the plugin's parameters and ensures dependencies are resolved and the module is loaded. + + This method acts as a setup routine by testing dependencies, downloading necessary files, + and dynamically importing the module and call module with given input arguments. + + Args: + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + The result of calling the imported module with provided arguments. + """ + if not self._module_loaded: + self.download() + if not self._dependencies_tested and not skip_dependency_check: + self.resolve_dependencies() + return self._imported_module(*args, **kwargs) + + def resolve_dependencies(self): + """Checks and installs any missing dependencies specified in the plugin's manifest file.""" + ptrn = r'(\w+)\s*(>=|<=|==|!=|>|<)\s*([0-9]+(?:\.[0-9]+)*)?' + deps = self._manifest['dependencies'] + print(f"++ Resolving python module dependencies...\n -> {deps}") + for module in tqdm(deps, desc=' -Dependencies', ncols=80): + if matched := re.match(ptrn, module): + self._status = None + self._pip_install(matched) + self._dependencies_tested = True + + def _pip_install(self, matched): + """Executes the pip install command for the matched dependency. + + Args: + matched (re.Match): A match object containing the module and version information. + + This method handles the pip installation process, directing output and errors appropriately. + """ + m, r, v = matched.groups() + cmd = [sys.executable, "-m", "pip", "install", f"{m}{r or ''}{v or ''}"] + displayed = 0 + with subproc.Popen(cmd, stdout=subproc.PIPE, stderr=subproc.PIPE, + text=True, bufsize=1, universal_newlines=True) as proc: + for l in proc.stdout: + if 'satisfied' in l.lower(): + if not displayed: + print(f" + Required already satisfied: {m}") + displayed += 1 + elif 'collecting' in l.lower(): + if not displayed: + print(f" + Installing '{m}' to resolve dependencies.") + displayed += 1 + proc.wait() + if proc.returncode != 0: + warnings.warn(f"'Errors during resolving dependencies': {''.join(proc.stderr)}") + + def download(self, dest: Optional[Path] = None, force: bool = False): + """Downloads the plugin to a specified destination or loads it directly into memory if no destination is provided. + This method also checks if the file already exists at the destination and optionally overwrites it based on the 'force' parameter. + + Args: + dest (Path, optional): The file system destination where the plugin files will be saved. + If None, files are loaded into memory. + force (bool, optional): If True, existing files at the destination will be overwritten. + Defaults to False. + """ + if not self._remote: + warnings.warn("Attempt to download failed: The plugin is already available " + "locally and cannot be downloaded again.", UserWarning) + return False + print(f"\n++ Downloading remote module to '{dest or 'memory'}'.") + files = self._contents['files'] if dest else self._get_module_files() + for filename, download_url in tqdm(files.items(), desc=' -Files', ncols=80): + if dest: + plugin_path = (dest / self.name) + plugin_path.mkdir(exist_ok=True) + plugin_file = plugin_path / filename + if plugin_file.exists() and not force: + warnings.warn(f"Warning: File '{filename}' already exists. Skipping download. Use 'force=True' to overwrite.", + UserWarning) + continue # Skip the download if file exists and force is False + with open(plugin_file, 'wb') as f: + for chunk in self._download_buffer(download_url, auth=self._auth): + f.write(chunk) + else: + # When downloading to memory + self._data[filename] = b''.join(self._download_buffer(download_url, auth=self._auth)) + self._module_loaded = True # Mark the module as loaded + + + def _get_module_files(self): + return {f:url for f, url in self._contents['files'].items() if f.endswith('.py')} + + def _content_parser(self): + """Parses the contents of the plugin based on its current state (local or remote). + + This method sets the plugin's parameters and determines its validity based on the availability + and correctness of the required data. + """ + if len(self._contents['files']) == 0: + self.is_valid = False + return None + self._parse_files() + try: + self._set_params() + except KeyError: + self.is_valid = False + return None + + def _set_params(self): + self.name = self._manifest['name'] + self.version = self._manifest['version'] + self.type = self._manifest['subtype'] + self.is_valid = True + self._module_loaded = False if self._remote else True + + def _parse_files(self): + """Processes the contents, loading the manifest if necessary.""" + for filename, file_loc in self._contents['files'].items(): + if filename.lower() == 'manifest.yaml': + self._load_manifest(file_loc) + + def _parse_remote(self): + """Processes the contents if the plugin is in a remote state, loading the manifest if necessary.""" + for filename, download_url in self._contents['files'].items(): + if filename.lower() == 'manifest.yaml': + self._load_manifest(download_url) + + def _load_manifest(self, file_loc: Union[str, Path]): + """Loads the plugin's manifest from a remote URL. + + Args: + download_url (str): The URL from which to download the manifest. + + This method fetches and parses the plugin's manifest file, setting flags based on the contents. + """ + if self._remote: + bytes_data = b''.join(self._download_buffer(file_loc, auth=self._auth)) + self._manifest = yaml.safe_load(bytes_data) + else: + with open(file_loc, 'r') as f: + self._manifest = yaml.safe_load(f) + if self._manifest['type'] != 'plugin': + warnings.warn(f"The type annotation for the '{self._manifest['name']}' plugin manifest is not set as 'plugin.' \ + This may cause the plugin to function incorrectly.") + self.is_valid = False + + @property + def _imported_module(self): + """Dynamically imports the module from loaded data. + + This method uses the information from the manifest to import the specified module and method dynamically. + + Returns: + The imported method from the module. + """ + source = self._manifest['source'] + f, c = source.split(':') + mloc = self._data[f] if self._remote else self._contents['files'][f] + loader = ModuleLoader(mloc) + module = loader.get_module(self.name) + return getattr(module, c) + + def __repr__(self): + if self.is_valid: + repr = f"PlugInSnippet<{self.type}>::{self.name}[{self.version}]" + if self._remote: + repr += '+InMemory' if self._module_loaded else '+Remote' + return repr + else: + return "PlugInSnippet::InValidPlugin" diff --git a/brkraw/api/config/snippet/preset.py b/brkraw/api/config/snippet/preset.py new file mode 100644 index 0000000..48474ad --- /dev/null +++ b/brkraw/api/config/snippet/preset.py @@ -0,0 +1,8 @@ +"""Snippet for Preset""" + +from .base import Snippet + + +class Preset(Snippet): + def __init__(self): + raise NotImplementedError \ No newline at end of file diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index 1ad6ed2..d3d6fc9 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -26,16 +26,38 @@ from __future__ import annotations import os import yaml +import warnings +from copy import copy +from pathlib import Path +from dataclasses import dataclass from .scan import Scan from brkraw.api.pvobj import PvStudy from brkraw.api.analyzer.base import BaseAnalyzer from brkraw.api.helper.recipe import Recipe -from pathlib import Path from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional +@dataclass +class StudyHeader: + header: dict + scans: list + + +@dataclass +class ScanHeader: + scan_id: int + header: dict + recos: list + + +@dataclass +class RecoHeader: + reco_id: int + header: dict + + class Study(PvStudy, BaseAnalyzer): """Handles operations related to a specific study, integrating PvStudy and analytical capabilities. @@ -46,6 +68,8 @@ class Study(PvStudy, BaseAnalyzer): Attributes: header (Optional[dict]): Parsed study header information. """ + _info: StudyHeader + def __init__(self, path: Path) -> None: """Initializes the Study object with a specified path. @@ -102,6 +126,26 @@ def avail(self) -> list: @property def info(self) -> dict: + if hasattr(self, '_info'): + return self._stream_info() + else: + self._process_header() + return self._stream_info() + + def _stream_info(self): + stream = self._info.__dict__ + scans = {} + for s in self._info.scans: + scans[s.scan_id] = s.header + recos = {} + for r in s.recos: + recos[r.reco_id] = r.header + if recos: + scans[s.scan_id]['recos'] = recos + stream['scans'] = scans + return stream + + def _process_header(self): """Compiles comprehensive information about the study, including header details and scans. Uses external YAML configuration to drive the synthesis of structured information about the study, @@ -113,13 +157,18 @@ def info(self) -> dict: spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') with open(spec_path, 'r') as f: spec = yaml.safe_load(f) - info = {'header': Recipe(self, spec['study']).get(), - 'scans': {}} - for scan_id in self.avail: - scanobj = self.get_scan(scan_id) - info['scans'][scan_id] = Recipe(scanobj.info, spec['scan']).get() - info['scans'][scan_id]['recos'] = {} - for reco_id in scanobj.avail: - recoinfo = scanobj.get_scaninfo(reco_id) - info['scans'][scan_id]['recos'][reco_id] = Recipe(recoinfo, spec['reco']).get() - return info + self._info = StudyHeader(header=Recipe(self, copy(spec)['study']).get(), scans=[]) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for scan_id in self.avail: + scanobj = self.get_scan(scan_id) + scan_spec = copy(spec)['scan'] + scan_header = ScanHeader(scan_id=scan_id, header=Recipe(scanobj.info, scan_spec).get(), recos=[]) + for reco_id in scanobj.avail: + recoinfo = scanobj.get_scaninfo(reco_id) + reco_spec = copy(spec)['reco'] + reco_header = Recipe(recoinfo, reco_spec).get() + reco_header = RecoHeader(reco_id=reco_id, header=reco_header) if reco_header else None + if reco_header: + scan_header.recos.append(reco_header) + self._info.scans.append(scan_header) diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py index 92d7282..fdba6c1 100644 --- a/brkraw/api/helper/recipe.py +++ b/brkraw/api/helper/recipe.py @@ -8,19 +8,25 @@ from typing import Optional, Dict, List, Any from brkraw.api.analyzer import BaseAnalyzer + class Recipe(BaseHelper): def __init__(self, target: 'BaseAnalyzer', recipe: dict, legacy: bool = False, - startup_scripts:Optional[List[str]] = None): + startup_scripts: Optional[List[str]] = None): self.target = target self.recipe = recipe self.results = OrderedDict() self.backward_comp = legacy - self.startup_scripts = startup_scripts + self.startup_scripts = startup_scripts or [] self._parse_recipe() def _parse_recipe(self): for key, value in self.recipe.items(): - self.results[key] = self._eval_value(value) + if key == 'startup': + scripts = [s for s in value if s is not None] + self.startup_scripts.extend(scripts) + else: + if value := self._eval_value(value): + self.results[key] = value def _eval_value(self, value: Any): if isinstance(value, str): @@ -43,8 +49,11 @@ def _process_str(self, str_obj: str): return self._legacy_parser(str_obj) ptrn = r'(?P^[a-zA-Z][a-zA-Z0-9_]*)\.(?P[a-zA-Z][a-zA-Z0-9_]*)' if matched := re.match(ptrn, str_obj): - attr = getattr(self.target, matched['attr']) - return attr.get(matched['key'], None) + if hasattr(self.target, matched['attr']): + attr = getattr(self.target, matched['attr']) + return attr.get(matched['key'], None) + else: + return None else: return str_obj @@ -64,20 +73,25 @@ def _process_dict(self, dict_obj: Dict): else: processed = {} for key, value in dict_obj.items(): - processed[key] = self._eval_value(value) - return processed + if value := self._eval_value(value): + processed[key] = value + return processed if len(processed) else None def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): - script = dict_obj.pop(script_cmd) + script = dict_obj[script_cmd] if self.startup_scripts: for s in self.startup_scripts: exec(s) for key, value in dict_obj.items(): - value = self._eval_value(value) - if value == None: - return None - exec(f'global {key}') - exec(f'{key} = {value}') + if key != script_cmd: + value = self._eval_value(value) + if value == None: + return None + exec(f'global {key}') + try: + exec(f'{key} = {value}') + except NameError: + exec(f"{key} = '{value}'") exec(f"output = {script}", globals(), locals()) return locals()['output'] diff --git a/brkraw/api/plugin/__init__.py b/brkraw/api/plugin/__init__.py deleted file mode 100644 index a43766b..0000000 --- a/brkraw/api/plugin/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .aggregator import Aggregator -from .plugged import Plugged -from .preset import Preset - -__all__ = ['Aggregator', 'Plugged', 'Preset'] \ No newline at end of file diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index c9db2a9..5d1a19f 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -1,16 +1,16 @@ from __future__ import annotations import warnings import numpy as np -import nibabel as nib from pathlib import Path +from nibabel.nifti1 import Nifti1Image from .header import Header from brkraw.api.pvobj.base import BaseBufferHandler from brkraw.api.pvobj import PvScan, PvReco, PvFiles from brkraw.api.data import Scan from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Union, Literal - from brkraw.api.plugin import Plugged + from typing import List, Optional, Union, Literal + from brkraw.api.config.snippet import PlugInSnippet XYZT_UNITS = \ @@ -91,32 +91,74 @@ def get_nifti1image(scanobj: 'Scan', scale_mode: Optional[Literal['header', 'apply']] = None, subj_type: Optional[str] = None, subj_position: Optional[str] = None, - plugin: Optional['Plugged'] = None, - plugin_kws: dict = None): + plugin: Optional['PlugInSnippet'] = None, + plugin_kws: Optional[dict] = None) -> Union['Nifti1Image', List['Nifti1Image']]: + scale_correction = 1 if scale_mode == 'apply' else 0 if plugin and plugin.type == 'tonifti': - with plugin(scanobj, **plugin_kws) as p: - dataobj = p.get_dataobj(bool(scale_mode)) + with plugin.set(scanobj, **plugin_kws) as p: + dataobj = p.get_dataobj(scale_correction=scale_correction) affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) header = p.get_nifti1header() else: scale_mode = scale_mode or 'header' - dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) - affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) - header = BaseMethods.get_nifti1header(scanobj, reco_id, scale_mode) - return nib.Nifti1Image(dataobj, affine, header) + dataobj = BaseMethods.get_dataobj(scanobj=scanobj, + reco_id=reco_id, + scale_correction=scale_correction) + affine = BaseMethods.get_affine(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) + header = BaseMethods.get_nifti1header(scanobj=scanobj, + reco_id=reco_id, + scale_mode=scale_mode) + + if isinstance(dataobj, list): + # multi-dataobj (e.g. msme) + affine = affine if isinstance(affine, list) else [affine for _ in range(len(dataobj))] + return [Nifti1Image(dataobj=dobj, affine=affine[i], header=header) for i, dobj in enumerate(dataobj)] + if isinstance(affine, list): + # multi-slicepacks + return [Nifti1Image(dataobj[:,:,i,...], affine=aff, header=header) for i, aff in enumerate(affine)] + return Nifti1Image(dataobj=dataobj, affine=affine, header=header) class BasePlugin(Scan, BaseMethods): + """Base class for handling plugin operations, integrating scanning and basic method functionalities. + + This class initializes plugin operations with options for verbose output and integrates functionalities + from the Scan and BaseMethods classes. It provides methods to close the plugin and clear any cached data. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): An object representing the PV (ParaVision) scan, reconstruction, + or file data, which is central to initializing the plugin operations. + verbose (bool): Flag to enable verbose output during operations, defaults to False. + **kwargs: Additional keyword arguments that are passed to the superclass. + + Attributes: + verbose (bool): Enables or disables verbose output. + """ def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], verbose: bool=False, **kwargs): + """Initializes the BasePlugin with a PV object, optional verbosity, and other parameters. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The primary object associated with ParaVision operations. + verbose (bool, optional): If True, enables verbose output. Defaults to False. + **kwargs: Arbitrary keyword arguments passed to the superclass initialization. + """ super().__init__(pvobj, **kwargs) self.verbose = verbose def close(self): + """Closes the plugin and clears any associated caches by invoking the clear_cache method. + """ super().close() self.clear_cache() def clear_cache(self): + """Clears all cached data associated with the plugin. This involves deleting files that have been + cached during plugin operations. + """ for buffer in self._buffers: file_path = Path(buffer.name) if file_path.exists(): From f5326881257f8c41d298a3c9da1fc217bd02024e Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 13:20:21 -0400 Subject: [PATCH 03/16] fix(tonifti): bug fix on plugin integration - missing config initiation at project root - several minor tonifti app related bug fix included - version upgrade to 0.4.0 upon new feature --- brkraw/__init__.py | 4 +++- brkraw/api/__init__.py | 3 ++- brkraw/app/tonifti/base.py | 43 +++++++++++++++++++++++++++++------- brkraw/app/tonifti/header.py | 4 ++++ brkraw/app/tonifti/scan.py | 9 ++++---- brkraw/app/tonifti/study.py | 8 +++---- pyproject.toml | 20 +++++++++-------- 7 files changed, 63 insertions(+), 28 deletions(-) diff --git a/brkraw/__init__.py b/brkraw/__init__.py index 130a9c2..82a46a7 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,6 +1,8 @@ from .lib import * +from .api import ConfigManager -__version__ = '0.3.11' +config = ConfigManager() +__version__ = '0.4.00' __all__ = ['BrukerLoader', '__version__', 'config'] diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 60826d6..2c9217a 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,5 @@ from .data import Study from .config import Manager as ConfigManager +from .config.snippet.plugin import PlugIn as PlugInSnippet -__all__ = ['Study', 'ConfigManager'] \ No newline at end of file +__all__ = ['Study', 'ConfigManager', 'PlugInSnippet'] \ No newline at end of file diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 5d1a19f..162ac97 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -4,13 +4,15 @@ from pathlib import Path from nibabel.nifti1 import Nifti1Image from .header import Header +from brkraw import config from brkraw.api.pvobj.base import BaseBufferHandler from brkraw.api.pvobj import PvScan, PvReco, PvFiles from brkraw.api.data import Scan +from brkraw.api.config.snippet import PlugInSnippet from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import List, Optional, Union, Literal - from brkraw.api.config.snippet import PlugInSnippet + from brkraw.api.config.manager import Manager as ConfigManager XYZT_UNITS = \ @@ -18,6 +20,8 @@ class BaseMethods(BaseBufferHandler): + config: ConfigManager = config + def set_scale_mode(self, scale_mode: Optional[Literal['header', 'apply']] = None): self.scale_mode = scale_mode or 'header' @@ -91,16 +95,39 @@ def get_nifti1image(scanobj: 'Scan', scale_mode: Optional[Literal['header', 'apply']] = None, subj_type: Optional[str] = None, subj_position: Optional[str] = None, - plugin: Optional['PlugInSnippet'] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, plugin_kws: Optional[dict] = None) -> Union['Nifti1Image', List['Nifti1Image']]: - scale_correction = 1 if scale_mode == 'apply' else 0 - if plugin and plugin.type == 'tonifti': - with plugin.set(scanobj, **plugin_kws) as p: - dataobj = p.get_dataobj(scale_correction=scale_correction) - affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) - header = p.get_nifti1header() + if plugin: + if isinstance(plugin, str): + not_available = False + fetcher = config.get_fetcher('plugin') + # check plugin available on local + if fetcher.is_cache: + # No plugin downloaded, check on remote + if available := [p for p in fetcher.remote if p.name == plugin]: + plugin = available.pop() + else: + not_available = True + else: + if available := [p for p in fetcher.local if p.name == plugin]: + plugin = available.pop() + else: + not_available = True + if isinstance(plugin, PlugInSnippet) and plugin.type == 'tonifti': + with plugin.set(pvobj=scanobj.pvobj, **plugin_kws) as p: + dataobj = p.get_dataobj() + affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) + header = p.get_nifti1header() + else: + not_available = True + if not_available: + warnings.warn("Failed. Given plugin not available, please install local plugin or use from available on " + f"remote repository. -> {[p.name for p in fetcher.remote]}", + UserWarning) + return None else: scale_mode = scale_mode or 'header' + scale_correction = 1 if scale_mode == 'apply' else 0 dataobj = BaseMethods.get_dataobj(scanobj=scanobj, reco_id=reco_id, scale_correction=scale_correction) diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 378606d..7acd992 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -1,3 +1,7 @@ +"""This module create header +currently not functioning as expected, need to work more +""" + from __future__ import annotations import warnings from nibabel.nifti1 import Nifti1Header diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py index 96a442f..f4d71aa 100644 --- a/brkraw/app/tonifti/scan.py +++ b/brkraw/app/tonifti/scan.py @@ -7,7 +7,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Union, Optional, Literal - from brkraw.api.plugin import Plugged + from brkraw.api import PlugInSnippet class ScanToNifti(Scan, BaseMethods): @@ -25,16 +25,15 @@ def __init__(self, if len(paths) == 0: super().__init__(**kwargs) else: - if len(paths) == 1 and paths[0].is_dir(): abspath = paths[0].absolute() + print(abspath) if contents := self._is_pvscan(abspath): pvobj = self._construct_pvscan(abspath, contents) elif contents := self._is_pvreco(abspath): pvobj = self._construct_pvreco(abspath, contents) else: pvobj = PvFiles(*paths) - # self.scanobj = Scan(pvobj=pvobj, reco_id=pvobj._reco_id) super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) @@ -116,14 +115,14 @@ def get_nifti1header(self, reco_id: Optional[int] = None, scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - return super().get_nifti1header(self, reco_id, scale_mode).get() + return super().get_nifti1header(self, reco_id, scale_mode) def get_nifti1image(self, reco_id: Optional[int] = None, scale_mode: Optional[Literal['header', 'apply']] = None, subj_type: Optional[str] = None, subj_position: Optional[str] = None, - plugin: Optional['Plugged'] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode return super().get_nifti1image(self, diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py index bc87ecc..38e0b4c 100644 --- a/brkraw/app/tonifti/study.py +++ b/brkraw/app/tonifti/study.py @@ -5,9 +5,9 @@ from .scan import ScanToNifti from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Literal + from typing import Optional, Literal, Union from pathlib import Path - from brkraw.api.plugin import Plugged + from brkraw.api import PlugInSnippet class StudyToNifti(Study, BaseMethods): @@ -84,7 +84,7 @@ def get_nifti1header(self, scanobj = self.get_scan(scan_id=scan_id, reco_id=reco_id) return super().get_nifti1header(scanobj=scanobj, - scale_mode=scale_mode).get() + scale_mode=scale_mode) def get_nifti1image(self, scan_id: int, @@ -92,7 +92,7 @@ def get_nifti1image(self, scale_mode: Optional[Literal['header', 'apply']] = None, subj_type: Optional[str] = None, subj_position: Optional[str] = None, - plugin: Optional['Plugged'] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode scanobj = self.get_scan(scan_id=scan_id, diff --git a/pyproject.toml b/pyproject.toml index b56781d..a6de40d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,14 +10,11 @@ classifiers = [ 'Natural Language :: English', ] dependencies = [ + 'pyyaml>=6.0.1', 'nibabel>=3.0.2', 'numpy>=1.18.0', - 'pandas>=1.0.0', - 'pillow>=7.1.1', 'tqdm>=4.45.0', - 'openpyxl>=3.0.3', - 'xlrd>=1.0.0', - 'toml>=0.10.2' + 'pillow>=7.1.1', ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} @@ -29,16 +26,21 @@ requires-python = ">=3.7" keywords = ['bruker', 'data_handler', 'converter', 'administrator_tool'] [project.urls] -Homepage = "https://github.com/brkraw/brkraw" +Homepage = "https://brkraw.github.io" [project.optional-dependencies] -SimpleITK = [ - 'SimpleITK>=1.2.4' +legacy = [ + 'pandas>=1.0.0', + 'openpyxl>=3.0.3', + 'xlrd>=1.0.0', + 'SimpleITK>=1.2.4', ] + dev = [ "flake8", "pytest", - "nbmake" + "nbmake", + "types-PyYAML" ] [tool.hatch.version] From 7e3ba480b29628292afd092817c7871b9a8eb46b Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 21:29:52 -0400 Subject: [PATCH 04/16] feat(fetcher): Enhance remote accessibility and add initial docstrings - Enable SnippetsFetcher to automatically search for remote content upon invocation. - Add the first version of docstrings throughout the module to improve code documentation. --- brkraw/api/config/fetcher/__init__.py | 11 +++ brkraw/api/config/fetcher/base.py | 106 +++++++++++++++++++++--- brkraw/api/config/fetcher/snippets.py | 113 +++++++++++++++++++++----- 3 files changed, 199 insertions(+), 31 deletions(-) diff --git a/brkraw/api/config/fetcher/__init__.py b/brkraw/api/config/fetcher/__init__.py index b98bc4b..eac06c5 100644 --- a/brkraw/api/config/fetcher/__init__.py +++ b/brkraw/api/config/fetcher/__init__.py @@ -1,3 +1,14 @@ +"""Initialization for the fetcher module. + +This module consolidates various fetching functionalities and exposes the Snippets class +for fetching and managing snippets from local and remote sources. + +Exposes: + SnippetsFetcher: A class derived from the Snippets module, tailored to handle the fetching, + storage, and synchronization of code snippets or configurations from + designated sources. +""" + from .snippets import Snippets as SnippetsFetcher __all__ = ['SnippetsFetcher'] \ No newline at end of file diff --git a/brkraw/api/config/fetcher/base.py b/brkraw/api/config/fetcher/base.py index a7e9f0c..9fdfd27 100644 --- a/brkraw/api/config/fetcher/base.py +++ b/brkraw/api/config/fetcher/base.py @@ -1,27 +1,47 @@ +"""Provides a base Fetcher class for accessing and manipulating content from remote repositories. + +This module is designed to facilitate the retrieval of repository data, specifically from GitHub, +by providing methods to authenticate, fetch, and traverse directories. It integrates direct +API requests to handle repository contents and provides utility functions for downloading files +and walking through repository directories recursively. + +Classes: + Fetcher: A base class for fetching content from remote repositories with GitHub API integration. +""" + from __future__ import annotations import re import warnings import requests +from brkraw.api.util.package import PathResolver from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import List, Tuple - from typing import Optional, Union, Generator + from typing import Optional, Union + from typing import List, Tuple, Generator -class Fetcher: - """Base Fetcher class +class Fetcher(PathResolver): + """Base class for fetching remote content with methods to authenticate and navigate repositories. - Returns: - _type_: _description_ + The Fetcher class extends the functionality of PathResolver to include methods that handle + the authentication and retrieval of data from remote GitHub repositories. It provides + utilities to walk through repository directories, fetch file and directory contents, + and download files as needed. - Yields: - _type_: _description_ + Attributes: + _auth (Union[List[Tuple[str, str]], Tuple[str, str]]): Authentication credentials for the repository. + repos (dict): Configuration for the repositories to be accessed. """ _auth: Union[List[Tuple[str, str]], Tuple[str, str]] repos: dict @staticmethod def is_connected(): + """Check if there is an internet connection available by pinging a known URL. + + Returns: + bool: True if the connection is successful, False otherwise. + """ try: Fetcher._fetch_from_url('https://api.github.com') except (requests.ConnectTimeout, requests.ConnectionError, requests.RequestException): @@ -29,12 +49,23 @@ def is_connected(): return True def _set_auth(self): - """Set authentication to access repository""" + """Set up authentication credentials for accessing configured repositories. + + Extracts and sets authentication details for each repository from the provided configurations. + """ if isinstance(self.repos, list): self._auth = [self._fetch_auth(repo) for repo in self.repos] @staticmethod def _fetch_auth(repo_dict: dict): + """Fetch authentication credentials from a repository configuration. + + Args: + repo_dict (dict): Repository configuration containing 'auth' fields. + + Returns: + Optional[Tuple[str, str]]: A tuple containing username and token if both are present, otherwise None. + """ if 'auth' in repo_dict: username = repo_dict['auth']['username'] token = repo_dict['auth']['token'] @@ -43,12 +74,31 @@ def _fetch_auth(repo_dict: dict): @staticmethod def _walk_github_repo(repo_url: dict, path: Optional['str'] = None, auth: Tuple[str, str] = None): - """Recursively walk through directories in a GitHub repository.""" + """Recursively walk through directories in a GitHub repository to fetch directory and file structure. + + Args: + repo_url (dict): URL of the GitHub repository. + path (Optional[str]): Specific path in the repository to start the walk. + auth (Tuple[str, str]): Authentication credentials for accessing the repository. + + Yields: + dict: A dictionary containing 'path', 'dirs', and 'files' with their respective URLs. + """ base_url = Fetcher._decode_github_repo(repo_url=repo_url, path=path) return Fetcher._walk_dir(url=base_url, auth=auth) @staticmethod def _walk_dir(url, path='', auth: Tuple[str, str] = None): + """Walk through a specific directory in a repository. + + Args: + url (str): URL of the directory to walk through. + path (str): Path relative to the repository root. + auth (Tuple[str, str]): Authentication credentials for accessing the repository. + + Yields: + dict: A dictionary containing the path, directories, and files within the directory. + """ contents = Fetcher._fetch_from_url(url=url, auth=auth).json() dirs, files = Fetcher._fetch_directory_contents(contents) yield {'path':path, @@ -62,6 +112,14 @@ def _walk_dir(url, path='', auth: Tuple[str, str] = None): @staticmethod def _fetch_directory_contents(contents): + """Categorize contents of a directory into subdirectories and files. + + Args: + contents (list): List of contents from a directory. + + Returns: + tuple: A tuple containing lists of directories and files. + """ dirs, files = [], [] for i, item in enumerate(contents): if item['type'] == 'dir': @@ -72,6 +130,15 @@ def _fetch_directory_contents(contents): @staticmethod def _decode_github_repo(repo_url: dict, path: Optional['str'] = None): + """Decode a GitHub repository URL to construct an API endpoint URL. + + Args: + repo_url (dict): The GitHub repository URL. + path (Optional[str]): An optional path within the repository. + + Returns: + str: A constructed API endpoint URL based on the repository details. + """ ptrn_github = r'https://(?:[^/]+\.)?github\.com/(?P[^/]+)/(?P[^/.]+)(?:\.git])?' if matched := re.match(ptrn_github, repo_url): owner = matched['owner'] @@ -81,6 +148,15 @@ def _decode_github_repo(repo_url: dict, path: Optional['str'] = None): @staticmethod def _fetch_from_url(url: str, auth: Tuple[str, str] = None) -> Optional[requests.Response]: + """Fetch data from a given URL using optional authentication. + + Args: + url (str): The URL from which to fetch data. + auth (Tuple[str, str]): Optional authentication credentials. + + Returns: + Optional[requests.Response]: The response object if successful, otherwise None. + """ response = requests.get(url, auth=auth) if response.status_code == 200: return response @@ -92,6 +168,16 @@ def _fetch_from_url(url: str, auth: Tuple[str, str] = None) -> Optional[requests def _download_buffer(url: dict, chunk_size: int = 8192, auth: Tuple[str, str] = None) -> Union[Generator, bool]: + """Download file content from a URL in buffered chunks. + + Args: + url (dict): The URL of the file to download. + chunk_size (int): The size of each chunk in bytes. + auth (Tuple[str, str]): Optional authentication credentials. + + Returns: + Union[Generator, bool]: A generator yielding file chunks if successful, False on error. + """ try: response = requests.get(url, stream=True, auth=auth) response.raise_for_status() diff --git a/brkraw/api/config/fetcher/snippets.py b/brkraw/api/config/fetcher/snippets.py index 17d108c..754b9b6 100644 --- a/brkraw/api/config/fetcher/snippets.py +++ b/brkraw/api/config/fetcher/snippets.py @@ -1,7 +1,18 @@ -"""Docstring.""" +"""Provides functionality to manage and synchronize snippets across local and remote sources. + +This module defines a `Snippets` class which aggregates snippets from various sources, +handles their synchronization, and ensures that the snippets are up-to-date according to +user-specified modes (plugin, preset, bids, app). It supports operations on snippets +fetched from both local file systems and remote repositories, offering features to check +connectivity, fetch content, and validate snippet integrity. + +Classes: + Snippets: Manages the aggregation and synchronization of snippets based on specified modes. +""" from __future__ import annotations import os +import warnings from pathlib import Path from .base import Fetcher from brkraw.api.config.snippet import PlugInSnippet @@ -11,67 +22,88 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import List - from typing import Tuple, Optional, Literal, Union + from typing import Tuple, Optional, Literal from brkraw.api.config.snippet.base import Snippet class Snippets(Fetcher): - """Class aggregate all available plugins + """Manages the aggregation of snippets from various sources based on the specified mode. + + This class integrates local and remote snippet sources, handling their fetching, storing, + and updating based on connectivity and cache settings. """ path: Optional[Path] mode: Literal['plugin', 'preset', 'bids', 'app'] is_cache: bool - _template: List = [] - _remote_snippets: List = [] - _local_snippets: List = [] - _template_snippets: List = [] + _fetched: bool = False + _template: List[Snippet] = [] + _remote_snippets: List[Snippet] = [] + _local_snippets: List = [Snippet] + _template_snippets: List = [Snippet] def __init__(self, repos: dict, mode: Literal['plugin', 'preset', 'bids', 'app'], path: Tuple[Optional['Path'], 'bool'] = (None, False) ) -> None: - """_summary_ + """Initializes the Snippets object with specified repository configurations and operational mode. Args: - repos (dict): _description_ - path (Path, optional): _description_. Defaults to None. - cache (bool, optional): _description_. Defaults to False. + repos (dict): A dictionary containing repository configurations. + mode (Literal['plugin', 'preset', 'bids', 'app']): The operational mode determining the type of snippets to manage. + path (Tuple[Optional[Path], bool], optional): A tuple containing the path to local storage and a boolean indicating cache usage. """ self.repos = repos self.mode = mode - self.path, self.is_cache = path + self.path = self._resolve(path[0]) + self.is_cache = path[1] self._set_auth() self._fetch_local_contents() self._template = [c[mode]['template'] for c in repos if 'template' in c[mode]] def _fetch_local_contents(self) -> Optional[list]: - """ + """Fetches snippets from local storage based on the current mode and path settings. + + Gathers contents from the specified directory and converts them into snippets. This operation + is skipped if caching is enabled. + + Returns: + Optional[list]: Returns None if caching is enabled, otherwise returns a list of fetched local contents. """ if self.is_cache: return None if self.mode in ['plugin', 'preset', 'bids']: contents = [] for path, dirs, files in os.walk(self.path): - child = {'path':path, - 'dirs':{d:Path(path) / d for d in dirs}, - 'files':{f:Path(path) / f for f in files}} + child = {'path':self._resolve(path), + 'dirs':{d:self._resolve(path) / d for d in dirs}, + 'files':{f:self._resolve(path) / f for f in files}} contents.append(child) self._convert_contents_to_snippets([contents], remote=False) - def _is_template(self, repo_id: int, snippet: Snippet) -> bool: - return any(snippet.name == t for t in self._template[repo_id]) - def _fetch_remote_contents(self) -> None: - """ built-in plugins from build-in dir + """Fetches snippets from remote repositories if connected and not previously fetched. + + Retrieves snippet data from remote sources as specified by the repository configuration + and converts them into snippet objects. Updates the fetched status upon completion. """ if self.repos and self.mode in ['plugin', 'preset', 'bids']: contents = [self._walk_github_repo(repo_url=repo['url'], path=repo[self.mode]['path'], auth=self._auth[i]) for i, repo in enumerate(self.repos)] self._convert_contents_to_snippets(contents=contents, remote=True) + self._fetched = True def _convert_contents_to_snippets(self, contents: list, remote: bool = False) -> None: + """Converts fetched contents from either local or remote sources into snippet objects. + + Iterates over fetched contents, creating snippet objects which are then stored appropriately + based on their validation status and whether they match predefined templates. + + Args: + contents (list): List of contents fetched from either local or remote sources. + remote (bool, optional): Flag indicating whether the contents are from remote sources. + """ for repo_id, contents in enumerate(contents): for c in contents: if remote: @@ -84,6 +116,15 @@ def _convert_contents_to_snippets(self, contents: list, remote: bool = False) -> self._local_snippets.append(snippet) def _store_remote_snippet(self, repo_id: int, snippet: Snippet): + """Stores validated remote snippets into the appropriate lists based on template matching. + + Checks if the snippet is valid and if it matches a template or not. Based on this, + the snippet is added to the respective list (template snippets or general remote snippets). + + Args: + repo_id (int): The repository ID corresponding to the snippet source. + snippet (Snippet): The snippet object to be stored. + """ if not snippet.is_valid: return None if self._is_template(repo_id, snippet) and \ @@ -95,6 +136,11 @@ def _store_remote_snippet(self, repo_id: int, snippet: Snippet): @property def _snippet(self): + """Determines the snippet class based on the operational mode. + + Returns: + Type[Snippet]: Returns the class type corresponding to the operational mode (Plugin, Preset, BIDS, App). + """ if self.mode == 'plugin': return PlugInSnippet elif self.mode == 'preset': @@ -106,8 +152,33 @@ def _snippet(self): @property def remote(self): - return self._remote_snippets + """Access the remote snippets if available. Fetches the snippets from a remote source if not already fetched + and if a network connection is available. + + Returns: + Any: The remote snippets if available and connected, otherwise None. + + Raises: + Warning: If the connection to fetch remote snippets fails. + """ + if self._remote_snippets: + return self._remote_snippets + else: + if self.is_connected(): + self._fetch_remote_contents() + return self._remote_snippets + else: + warnings.warn("Connection failed. Please check your network settings.") + return None + + def _is_template(self, repo_id: int, snippet: Snippet) -> bool: + """Test given snippet is template. This internal method used to exclude template snippets from avail.""" + return any(snippet.name == t for t in self._template[repo_id]) @property def local(self): return self._local_snippets + + @property + def is_up_to_date(self): + return self._fetched \ No newline at end of file From 9bf0a0ef7e89ee2cc4723d7e2b9431cbfc6ee135 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 21:50:12 -0400 Subject: [PATCH 05/16] docs(analyzer): Update and expand docstrings for clarity - Revise and enhance docstrings throughout the module to improve code documentation and readability. - Add detailed descriptions for class methods and properties, focusing on parameters and behaviors. --- brkraw/api/analyzer/__init__.py | 13 +++++++++++ brkraw/api/analyzer/affine.py | 39 ++++++++++++++++++++++++++++++++ brkraw/api/analyzer/base.py | 20 ++++++++++++++++ brkraw/api/analyzer/dataarray.py | 36 +++++++++++++++++++++++++++-- brkraw/api/analyzer/scaninfo.py | 33 +++++++++++++++++++++++---- 5 files changed, 134 insertions(+), 7 deletions(-) diff --git a/brkraw/api/analyzer/__init__.py b/brkraw/api/analyzer/__init__.py index dce03d3..9bc47d6 100644 --- a/brkraw/api/analyzer/__init__.py +++ b/brkraw/api/analyzer/__init__.py @@ -1,3 +1,16 @@ +"""Analyzer module initialization. + +This module imports and exposes various analyzer classes used to parse and process +information from raw datasets into more readable formats. Each analyzer provides +specific functionalities tailored to different aspects of data processing and analysis. + +Exposed Classes: + BaseAnalyzer: Provides common features and utilities shared among all analyzers. + ScanInfoAnalyzer: Specializes in parsing and analyzing scan information from raw datasets. + AffineAnalyzer: Handles the computation and analysis of affine matrices from dataset parameters. + DataArrayAnalyzer: Focuses on parsing and returning structured data arrays and related metadata. +""" + from .base import BaseAnalyzer from .scaninfo import ScanInfoAnalyzer from .affine import AffineAnalyzer diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index aa1a677..c6af743 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -1,3 +1,11 @@ +"""Affine Matrix Analyzer Module. + +This module focuses on analyzing and processing affine matrices derived from imaging data. +It provides functionalities to calculate, adjust, and standardize affine transformations based +on specific imaging parameters and subject orientations, thereby facilitating accurate spatial +orientation and alignment of imaging data. +""" + from __future__ import annotations from brkraw.api import helper from .base import BaseAnalyzer @@ -22,7 +30,24 @@ class AffineAnalyzer(BaseAnalyzer): + """Processes affine matrices from raw dataset parameters to ensure proper spatial orientation. + + This analyzer calculates affine matrices based on imaging data and subject configurations. + It supports various adjustments based on subject type and pose, ensuring the matrices are + suitable for specific analysis and visualization requirements. + + Args: + infoobj (ScanInfo): The information object containing imaging parameters and subject orientation. + + Attributes: + resolution (list[tuple]): Resolution details extracted from imaging data. + affine (np.ndarray or list[np.ndarray]): The calculated affine matrices. + subj_type (str): The type of the subject (e.g., Biped, Quadruped). + subj_position (str): The position of the subject during the scan. + """ def __init__(self, infoobj: 'ScanInfo'): + """Initialize the AffineAnalyzer with an information object. + """ infoobj = copy(infoobj) if infoobj.image['dim'] == 2: xr, yr = infoobj.image['resolution'] @@ -43,6 +68,8 @@ def __init__(self, infoobj: 'ScanInfo'): self.subj_position = infoobj.orientation['subject_position'] if hasattr(infoobj, 'orientation') else None def get_affine(self, subj_type: Optional[str] = None, subj_position: Optional[str] = None): + """Retrieve the affine matrix, applying corrections based on subject type and position. + """ subj_type = subj_type or self.subj_type subj_position = subj_position or self.subj_position if isinstance(self.affine, list): @@ -52,6 +79,8 @@ def get_affine(self, subj_type: Optional[str] = None, subj_position: Optional[st return affine def _calculate_affine(self, infoobj: 'ScanInfo', slicepack_id: Optional[int] = None): + """Calculate the initial affine matrix based on the imaging data and subject orientation. + """ sidx = infoobj.orientation['orientation_desc'][slicepack_id].index(2) \ if slicepack_id else infoobj.orientation['orientation_desc'].index(2) slice_orient = SLICEORIENT[sidx] @@ -69,12 +98,16 @@ def _calculate_affine(self, infoobj: 'ScanInfo', slicepack_id: Optional[int] = N @staticmethod def _correct_origin(orientation, volume_origin, slice_distance): + """Adjust the origin of the volume based on slice orientation and distance. + """ new_origin = orientation.dot(volume_origin) new_origin[-1] += slice_distance return orientation.T.dot(new_origin) @staticmethod def _compose_affine(resolution, orientation, volume_origin, slice_orient): + """Compose the affine transformation matrix using the provided resolution, orientation, and origin. + """ resol = np.array(resolution) if slice_orient in ['axial', 'sagital']: resol = np.diag(resol) @@ -86,6 +119,8 @@ def _compose_affine(resolution, orientation, volume_origin, slice_orient): @staticmethod def _est_rotate_angle(subj_pose): + """Estimate the rotation angle needed based on the subject's pose. + """ rotate_angle = {'rad_x':0, 'rad_y':0, 'rad_z':0} if subj_pose: if subj_pose == 'Head_Supine': @@ -112,6 +147,8 @@ def _est_rotate_angle(subj_pose): @classmethod def _correct_orientation(cls, affine, subj_pose, subj_type): + """Correct the orientation of the affine matrix based on the subject's type and pose. + """ cls._inspect_subj_info(subj_pose, subj_type) rotate_angle = cls._est_rotate_angle(subj_pose) affine = helper.rotate_affine(affine, **rotate_angle) @@ -122,6 +159,8 @@ def _correct_orientation(cls, affine, subj_pose, subj_type): @staticmethod def _inspect_subj_info(subj_pose, subj_type): + """Validate subject type and pose information. + """ if subj_pose: part, side = subj_pose.split('_') assert part in SUBJPOSE['part'], 'Invalid subject position' diff --git a/brkraw/api/analyzer/base.py b/brkraw/api/analyzer/base.py index 76fa42d..b2894b6 100644 --- a/brkraw/api/analyzer/base.py +++ b/brkraw/api/analyzer/base.py @@ -1,3 +1,23 @@ +"""Base components for data analysis. + +This module provides foundational classes and utilities that are shared across different +analyzers within the helper module. These components serve as the base for more specialized +data processing and analysis tasks. +""" + class BaseAnalyzer: + """A base class providing common functionalities for data analyzers. + + This class serves as a parent to various specialized analyzers, providing shared methods + and utility functions to assist in data analysis tasks. + + Methods: + to_dict: Returns a dictionary representation of the instance's attributes. + """ def to_dict(self): + """Convert the analyzer's attributes to a dictionary format. + + Returns: + dict: A dictionary containing all attributes of the analyzer instance. + """ return self.__dict__ \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py index d435c89..882286e 100644 --- a/brkraw/api/analyzer/dataarray.py +++ b/brkraw/api/analyzer/dataarray.py @@ -1,21 +1,49 @@ +"""Data Array Analyzer Module. + +This module is dedicated to the analysis of data arrays, focusing on extracting and structuring +data array information from raw datasets. It provides functionalities to interpret and convert +data arrays into more accessible formats, complementing the broader data processing framework. +""" + from __future__ import annotations -from .base import BaseAnalyzer import numpy as np from copy import copy -from typing import TYPE_CHECKING, Union +from .base import BaseAnalyzer +from typing import TYPE_CHECKING if TYPE_CHECKING: from ..data import ScanInfo + from typing import Union from io import BufferedReader from zipfile import ZipExtFile class DataArrayAnalyzer(BaseAnalyzer): + """Analyzes specific data array information and returns structured data arrays and related metadata. + + This analyzer takes raw data array inputs and processes them to extract significant array metadata, + such as data type and shape, and prepares the data array for further analytical processing. + + Args: + infoobj (ScanInfo): The information object containing metadata related to data arrays. + fileobj (Union[BufferedReader, ZipExtFile]): The file object from which the data array is read. + + Attributes: + slope (float): The scaling factor applied to the data array values. + offset (float): The offset added to the data array values. + dtype (type): The data type of the data array. + shape (list[int]): The dimensions of the data array. + shape_desc (list[str]): Descriptions of the data array dimensions. + """ def __init__(self, infoobj: 'ScanInfo', fileobj: Union[BufferedReader, ZipExtFile]): + """Initialize the DataArrayAnalyzer with an information object and a file object. + """ infoobj = copy(infoobj) self._parse_info(infoobj) self.buffer = fileobj def _parse_info(self, infoobj: 'ScanInfo'): + """Parse the information object to set the data array properties such as slope, offset, and data type. + """ if not hasattr(infoobj, 'dataarray'): raise AttributeError self.slope = infoobj.dataarray['slope'] @@ -27,10 +55,14 @@ def _parse_info(self, infoobj: 'ScanInfo'): self._calc_array_shape(infoobj) def _calc_array_shape(self, infoobj: 'ScanInfo'): + """Calculate and extend the shape and description of the data array based on frame group information. + """ self.shape.extend(infoobj.frame_group['shape'][:]) self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) def get_dataarray(self): + """Read and return the structured data array from the buffer, applying data type and shape transformations. + """ self.buffer.seek(0) return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 14cf84a..3560406 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -1,3 +1,10 @@ +"""Scan information analysis module. + +This module defines the ScanInfoAnalyzer, which is essential for parsing and interpreting +metadata from multiple parameter files, making it more human-readable and accessible +for further processing and analysis tasks. +""" + from __future__ import annotations from collections import OrderedDict from brkraw.api import helper @@ -10,18 +17,27 @@ class ScanInfoAnalyzer(BaseAnalyzer): """Helps parse metadata from multiple parameter files to make it more human-readable. + This analyzer is crucial for reconstructing and interpreting various scan parameters + from raw dataset files, supporting enhanced data insights and accessibility. + Args: - pvobj (PvScan): The PvScan object containing acquisition and method parameters. - reco_id (int, optional): The reconstruction ID. Defaults to None. + pvobj (Union[PvScan, PvReco, PvFiles]): The PvObject containing various acquisition + and method parameters. + reco_id (int, optional): Specifies the reconstruction ID for targeted analysis. + Defaults to None. + debug (bool): Flag to enable debugging outputs for detailed tracing. - Raises: - NotImplementedError: If an operation is not implemented. + Attributes: + info_protocol (dict): Stores protocol-related information. + info_fid (dict): Contains information extracted from FID files. + visu_pars (OrderedDict): Visualization parameters extracted for analysis. """ def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id:Optional[int] = None, debug:bool = False): - + """Initialize the ScanInfoAnalyzer with specified parameters and optionally in debug mode. + """ self._set_pars(pvobj, reco_id) if not debug: self.info_protocol = helper.Protocol(self).get_info() @@ -30,6 +46,7 @@ def __init__(self, self._parse_info() def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int]): + """Set parameters from the PvObject for internal use.""" for p in ['acqp', 'method']: try: vals = getattr(pvobj, p) @@ -49,6 +66,8 @@ def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Option setattr(self, 'visu_pars', visu_pars) def _parse_info(self): + """Parse and process detailed information from the visualization parameters and other sources. + """ self.info_dataarray = helper.DataArray(self).get_info() self.info_frame_group = helper.FrameGroup(self).get_info() self.info_image = helper.Image(self).get_info() @@ -59,7 +78,11 @@ def _parse_info(self): self.info_orientation = helper.Orientation(self).get_info() def __dir__(self): + """List dynamic attributes of the instance related to informational properties. + """ return [attr for attr in self.__dict__.keys() if 'info_' in attr] def get(self, key): + """Retrieve information properties based on a specified key. + """ return getattr(self, key) if key in self.__dir__() else None \ No newline at end of file From 85cd3920e0bc4cba7a53508fa3793b3180cb08d9 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 21:55:36 -0400 Subject: [PATCH 06/16] refactor(data): Standardize path operations and fix study info persistence - Integrate `PathResolver` as a parent class across the `__init__.py` and `study.py` modules to standardize and normalize all path operations using `_resolve(path)`. - Fix a critical bug in `study.py` where study information was lost after execution. Now, study info persists correctly, enhancing data reliability for MRI scans and raw dataset sessions. - Ensure consistent and accurate path handling and data retention across the module. --- brkraw/api/data/__init__.py | 2 +- brkraw/api/data/study.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py index 4ba69e0..d340e41 100644 --- a/brkraw/api/data/__init__.py +++ b/brkraw/api/data/__init__.py @@ -13,7 +13,7 @@ making the package easier to use and integrate into larger projects or applications. Example: - from your_package_name import Study, Scan, ScanInfo + from brkraw.api.data import Study, Scan, ScanInfo This enables straightforward access to these classes for further development and deployment in MRI data analysis tasks. """ diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index d3d6fc9..15cf738 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -76,7 +76,7 @@ def __init__(self, path: Path) -> None: Args: path (Path): The file system path to the study data. """ - super().__init__(path) + super().__init__(self._resolve(path)) self._parse_header() def get_scan(self, @@ -126,14 +126,14 @@ def avail(self) -> list: @property def info(self) -> dict: - if hasattr(self, '_info'): - return self._stream_info() - else: + if not hasattr(self, '_info'): self._process_header() - return self._stream_info() + if not hasattr(self, '_streamed_info'): + self._streamed_info = self._stream_info() + return self._streamed_info def _stream_info(self): - stream = self._info.__dict__ + stream = copy(self._info.__dict__) scans = {} for s in self._info.scans: scans[s.scan_id] = s.header From 9ede1f08ca60206c0a33021e84461dc12eeab0ac Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 22:17:37 -0400 Subject: [PATCH 07/16] fix(parameters): Resolve type definition error under TYPE_CHECKING condition - Correct the misplaced type annotation that caused a 'type not found' error when the TYPE_CHECKING condition was active. - Ensure type annotations are correctly placed and accessible, preventing runtime errors in the parameters module. --- brkraw/api/config/fetcher/snippets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/brkraw/api/config/fetcher/snippets.py b/brkraw/api/config/fetcher/snippets.py index 754b9b6..128e029 100644 --- a/brkraw/api/config/fetcher/snippets.py +++ b/brkraw/api/config/fetcher/snippets.py @@ -38,8 +38,8 @@ class Snippets(Fetcher): _fetched: bool = False _template: List[Snippet] = [] _remote_snippets: List[Snippet] = [] - _local_snippets: List = [Snippet] - _template_snippets: List = [Snippet] + _local_snippets: List[Snippet] = [] + _template_snippets: List[Snippet] = [] def __init__(self, repos: dict, From 13c0402b638fe38a886dbd02d671bb1247525007 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 2 May 2024 22:43:18 -0400 Subject: [PATCH 08/16] docs(pvobj): Refine and standardize docstrings across module classes - Enhanced docstrings in `PvStudy`, `PvScan`, `PvReco`, and `PvFiles` classes for better clarity and consistency. - Improved method descriptions, parameter details, and added examples where missing. --- brkraw/api/pvobj/__init__.py | 16 +++ brkraw/api/pvobj/base.py | 134 ++++++++++++++++++++------ brkraw/api/pvobj/parameters.py | 171 +++++++++++++++++++++++---------- brkraw/api/pvobj/parser.py | 127 +++++++----------------- brkraw/api/pvobj/pvfiles.py | 73 +++++++++++--- brkraw/api/pvobj/pvreco.py | 89 +++++++++++------ brkraw/api/pvobj/pvscan.py | 111 ++++++++++++--------- brkraw/api/pvobj/pvstudy.py | 132 +++++++++++++------------ 8 files changed, 534 insertions(+), 319 deletions(-) diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index bbb04a0..74bd082 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,3 +1,19 @@ +"""Initialization for the pvobj module. + +This module is a cornerstone for interfacing with raw datasets within the Bruker imaging framework. +It provides essential classes for parsing raw datasets, managing parameter metadata, and organizing +data at various levels—from individual scans to comprehensive experimental sessions. + +Classes Exposed: + PvStudy: Manages data for an entire session, encapsulating all scans and reconstructions. + PvScan: Handles data related to individual scans, including raw FIDs, acquisition, and method parameters. + PvReco: Manages data related to image reconstructions within a single scan. + PvFiles: Provides a flexible container for raw files that may not be systematically organized, + allowing users to add any files and utilize full module functionalities if all required files are present. + Parameter: Represents parameter metadata for various components within a scan. + Parser: Facilitates the parsing of raw dataset information into structured formats. +""" + from .pvstudy import PvStudy from .pvscan import PvScan from .pvreco import PvReco diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 6576be1..c24356b 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -1,56 +1,87 @@ +"""Base functionality for handling buffer and method operations in pvobj. + +This module defines core classes that offer foundational utilities for managing and processing raw datasets. +The classes provide methods for handling file operations, such as opening and closing file buffers, fetching +directory structures, and more, all while using an object-oriented approach to maintain and access these datasets. + +Classes: + BaseBufferHandler: Manages file buffer operations, ensuring proper opening, closing, and context management of file streams. + BaseMethods: Extends BaseBufferHandler to include various file and directory handling methods necessary + for accessing and managing dataset contents. +""" + from __future__ import annotations import os import zipfile -from collections import OrderedDict -from collections import defaultdict -from typing import TYPE_CHECKING +from collections import OrderedDict, defaultdict from pathlib import Path from .parameters import Parameter - +from brkraw.api.util.package import PathResolver +from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, Union, List - from io import BufferedReader from zipfile import ZipExtFile + from io import BufferedReader + +class BaseBufferHandler(PathResolver): + """Handles buffer management for file operations, ensuring all file streams are properly managed. -class BaseBufferHandler: + This class provides context management for file buffers, allowing for easy and safe opening and closing + of file streams. It ensures that all buffers are closed when no longer needed, preventing resource leakage. + + Attributes: + _buffers (Union[List[BufferedReader], List[ZipExtFile]]): A list of file buffer objects. + """ _buffers: Union[List[BufferedReader], List[ZipExtFile]] = [] def close(self): + """Closes all open file buffers managed by this handler.""" if self._buffers: for b in self._buffers: if not b.closed: b.close() def __enter__(self): + """Enters the runtime context related to this object.""" return self def __exit__(self, exc_type, exc_val, exc_tb): + """Exits the runtime context and closes the file buffers, handling any exceptions.""" self.close() - # Return False to propagate exceptions, if any return False -class BaseMethods: - """ - The `BaseMethods` class provides internal method for PvObjects. +class BaseMethods(BaseBufferHandler): + """Provides utility methods for handling files and directories within PvObjects. - Explanation: - This class contains various methods for handling files and directories, including fetching directory structure, - fetching zip file contents, opening files as file objects or strings, retrieving values associated with keys, and setting configuration options. + This class offers methods to fetch directory structures, handle zip file contents, and open files either + as file objects or as readable strings. It also provides a property to access the contents of directories + and zip files, tailored to the needs of managing Bruker raw datasets. - Args: - **kwargs: Keyword arguments for configuration options. - - Returns: - None + Attributes: + _scan_id (Optional[int]): The identifier for a specific scan, used in file path resolutions. + _reco_id (Optional[int]): The identifier for a specific reconstruction, used in file path resolutions. + _path (Optional[Path]): The base path for file operations. + _rootpath (Optional[Path]): The root path of the dataset, used for resolving relative paths. + _contents (Optional[dict]): A structured dictionary containing directory and file details. """ - _scan_id = None - _reco_id = None - _path = None - _rootpath = None - _contents = None + _scan_id: int = None + _reco_id: int = None + _path: 'Path' = None + _rootpath: 'Path' = None + _contents: 'Path' = None - def isinstance(self, name): + def isinstance(self, name: str): + """Check if the class name matches the provided string. + + This method compares the class name of the current instance with a given string to determine if they match. + + Args: + name (str): The class name to check against the instance's class name. + + Returns: + bool: True if the given name matches the instance's class name, otherwise False. + """ return self.__class__.__name__ == name @staticmethod @@ -105,7 +136,7 @@ def _fetch_zip(path: 'Path'): contents[dirpath]['dirs'].add(dirname) return contents - def _open_as_fileobject(self, key): + def _open_as_fileobject(self, key: str): """Opens a file object for the given key. Args: @@ -139,7 +170,7 @@ def _open_as_fileobject(self, key): path = os.path.join(*path_list) return open(path, 'rb') - def _open_as_string(self, key): + def _open_as_string(self, key: str): """Opens a file as binary, decodes it as UTF-8, and splits it into lines. Args: @@ -166,7 +197,7 @@ def __getitem__(self, key): """ return self.__getattr__(key) - def __getattr__(self, key): + def __getattr__(self, key: str): """ Get attribute by name. @@ -195,9 +226,32 @@ def __getattr__(self, key): @property def contents(self): + """Access the contents dictionary holding directory and file details. + + This property provides access to a structured dictionary that organizes directory and file information, + facilitating file operations across the class methods. + + Returns: + dict: The contents dictionary with details about directories and files. + """ return self._contents def get_fid(self, scan_id:Optional[int] = None): + """Retrieve the file object for the 'fid' or 'rawdata.job0' file from the dataset. + + This method attempts to fetch the 'fid' file commonly used in imaging datasets. If 'fid' is not found, + it tries 'rawdata.job0'. It uses internal methods to navigate through dataset structures based on provided scan ID. + + Args: + scan_id (Optional[int]): The identifier for the scan. Necessary if the class structure requires it to fetch data. + + Returns: + BufferedReader: The file object for the 'fid' or 'rawdata.job0'. + + Raises: + TypeError: If 'scan_id' is required but not provided. + FileNotFoundError: If neither 'fid' nor 'rawdata.job0' files are found in the dataset. + """ try: pvobj = self.get_scan(scan_id) if hasattr(self, 'get_scan') else self except KeyError: @@ -210,6 +264,21 @@ def get_fid(self, scan_id:Optional[int] = None): "Please check the dataset and ensure the file is in the expected location.") def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): + """Retrieve the '2dseq' file from the dataset for a specific scan and reconstruction. + + This method navigates through the dataset structure to fetch the '2dseq' file, a common data file in imaging datasets. + + Args: + scan_id (Optional[int]): The scan ID to navigate to the correct scan. Required if the dataset structure is hierarchical. + reco_id (Optional[int]): The reconstruction ID. Required if multiple reconstructions exist and are not specified. + + Returns: + BufferedReader: The file object for the '2dseq'. + + Raises: + TypeError: If necessary IDs are not provided. + FileNotFoundError: If the '2dseq' file is not found in the dataset. + """ try: if scan_id and hasattr(self, 'get_scan'): pvobj = self.get_scan(scan_id).get_reco(reco_id) @@ -233,7 +302,16 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): "Please check the dataset and ensure the file is in the expected location.") @staticmethod - def _is_binary(fileobj, bytes=512): + def _is_binary(fileobj: BufferedReader, bytes: int = 512): + """Determine if a file is binary by reading a block of data. + + Args: + fileobj (BufferedReader): The file object to check. + bytes (int): Number of bytes to read for the check. + + Returns: + bool: True if the file contains binary data, otherwise False. + """ block = fileobj.read(bytes) fileobj.seek(0) return b'\x00' in block \ No newline at end of file diff --git a/brkraw/api/pvobj/parameters.py b/brkraw/api/pvobj/parameters.py index d50eb45..777730e 100644 --- a/brkraw/api/pvobj/parameters.py +++ b/brkraw/api/pvobj/parameters.py @@ -1,31 +1,57 @@ +"""Provides functionality for parsing and managing parameter metadata within Paravision datasets. + +This module includes the `Parameter` class, which extends the functionalities of a generic `Parser` class. +It specifically handles the extraction and management of parameter data and header information from strings +that represent parameter dictionaries in Paravision datasets. +These capabilities are critical for accessing and manipulating the underlying data in a structured and interpretable format. + +Classes: + Parameter: A class designed to parse and manage parameter dictionaries, providing access to parameters and headers, + processing content data, and setting parameter values based on input data. + +Dependencies: + re: Regular expression operations for parsing and processing text. + numpy: Provides support for large, multi-dimensional arrays and matrices, + along with a large collection of high-level mathematical functions to operate on these arrays. + OrderedDict: A dictionary subclass that remembers the order in which its contents are added, + used for maintaining an ordered set of parameters. +""" + +from __future__ import annotations import re import numpy as np from collections import OrderedDict from .parser import Parser, ptrn_comment, PARAMETER, HEADER +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional + from typing import List + from numpy.typing import NDArray class Parameter: - """ - Paravision Parameter object + """Handles the parsing and management of parameter data for Paravision experiments. - This class extends the Parser class and provides methods to initialize the object with a stringlist of parameter dictionaries, retrieve the parameters and headers, and process the contents of the data. + This class extends the Parser class, utilizing its functionalities to interpret a list of string + representations of parameter dictionaries, manage parameter and header information, and process the contents of the data. Args: - stringlist: A list of strings containing the parameter dictionaries. - - Examples: - >>> stringlist = ["param1", "param2"] - >>> parameter = Parameter(stringlist) + stringlist (List[str]): A list of strings containing parameter entries. + name (str): The name identifying the parser object. + scan_id (Optional[int]): The scan ID associated with the parameter data. + reco_id (Optional[int]): The reconstruction ID associated with the parameter data. Attributes: - parameters (property): Get the parameters of the data. - headers (property): Get the headers of the data. - - Methods: - _process_contents: Process the contents of the data based on the given parameters. - _set_param: Set the parameters and headers based on the given data. + _parameters (OrderedDict): Stores parameter values. + _header (OrderedDict): Stores header information. + _name (str): Name of the parser object. + _repr_items (List[str]): List of string representations for object description. """ - def __init__(self, stringlist, name, scan_id=None, reco_id=None): + def __init__(self, + stringlist: List[str], + name: str, + scan_id: Optional[int] = None, + reco_id: Optional[int] = None): """ Initialize the Parameter object with the given stringlist, name, scan_id, and reco_id. @@ -52,52 +78,50 @@ def __init__(self, stringlist, name, scan_id=None, reco_id=None): @property def name(self): + """Get a formatted name of the parser object, capitalizing each part separated by underscores. + + Returns: + str: A capitalized version of the name attribute. + """ if '_' in self._name: return ''.join([s.capitalize() for s in self._name.split('_')]) return self._name.capitalize() @property def parameters(self): - """ - Get the parameters of the data. + """Retrieve the parameters processed by the parser. Returns: - OrderedDict: The parameters of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the parameters. + OrderedDict: A dictionary containing the parameters of the data. """ return self._parameters @property def header(self): - """ - Get the headers of the data. + """Retrieve the headers processed by the parser. Returns: - OrderedDict: The headers of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the headers. + OrderedDict: A dictionary containing the headers of the data. """ return self._header - def _process_contents(self, contents, addr, addr_diff, index, value): - """ - Process the contents of the data based on the given parameters. + def _process_contents(self, + contents: List[str], + addr: int, + addr_diff: NDArray, + index: int, + value: str): + """Process the data contents based on parameter addresses and differences. Args: - contents: The contents of the data. - addr: The address of the current parameter. - addr_diff: The difference in addresses between parameters. - index: The index of the current parameter. - value: The value of the current parameter. + contents (List[str]): The full list of content strings. + addr (int): The current parameter's address in contents. + addr_diff (numpy.ndarray): An array of address differences between parameters. + index (int): The index of the current parameter. + value (str): The initial value of the parameter. Returns: - tuple: A tuple containing the processed data and its shape. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. + tuple: A tuple containing the processed data as a string and its shape or format as int. """ if addr_diff[index] > 1: c_lines = contents[(addr + 1):(addr + addr_diff[index])] @@ -105,20 +129,19 @@ def _process_contents(self, contents, addr, addr_diff, index, value): return (data, value) if data else (Parser.convert_string_to(value), -1) return Parser.convert_string_to(value), -1 - def _set_param(self, params, param_addr, contents): - """ - Set the parameters and headers based on the given data. + def _set_param(self, + params: List[tuple], + param_addr: List[int], + contents: List[str]): + """Initialize parameters and headers from parsed data. Args: - params: A list of parameter information. - param_addr: The addresses of the parameters. - contents: The contents of the data. + params (List[tuple]): List containing parameter tuples (dtype, key, value). + param_addr (List[int]): List of addresses where parameters are located in the content. + contents (List[str]): The contents as a list of strings from which to extract data. Raises: - ValueError: If an invalid dtype is encountered. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. + ValueError: If an invalid data type (dtype) is encountered. """ addr_diff = np.diff(param_addr) self._params_key_struct = params @@ -136,26 +159,74 @@ def _set_param(self, params, param_addr, contents): raise ValueError("Invalid dtype encountered in '_set_param'") def __getitem__(self, key): + """Allows dictionary-like access to parameters. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key in the parameters dictionary. + """ return self.parameters[key] def __getattr__(self, key): + """Allows attribute-like access to parameters. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key in the parameters dictionary. + """ return self.parameters[key] def __repr__(self): + """Provide a string representation of the Parameter object for debugging and logging. + + Returns: + str: A string representation of the object. + """ return f"{self.name}({', '.join(self._repr_items)})" def keys(self): + """Get the keys of the parameters dictionary. + + Returns: + KeysView: A view of the keys in the parameter dictionary. + """ return self.parameters.keys() def values(self): + """Get the values of the parameters dictionary. + + Returns: + ValuesView: A view of the values in the parameter dictionary. + """ return self.parameters.values() - def get(self, key): + def items(self): + """Get the key and value pairs of the parameters dictionary. + + Returns: + ItemView: A view of the values in the parameter dictionary. + """ + return self.parameters.items() + + def get(self, key: str): + """Get the value of a parameter by key, returning None if the key is not found. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key if it exists, otherwise None. + """ if key in self.keys(): return self.parameters[key] else: return None def is_parameter(self): + """True if data successfully loaded""" return True if self.header else False \ No newline at end of file diff --git a/brkraw/api/pvobj/parser.py b/brkraw/api/pvobj/parser.py index 59225b1..40db6bf 100755 --- a/brkraw/api/pvobj/parser.py +++ b/brkraw/api/pvobj/parser.py @@ -1,3 +1,15 @@ +"""Provides parsing utilities for handling and converting parameter data from string representations to structured formats. + +This module includes the `Parser` class, which leverages regular expressions to accurately parse and convert various +data types found in parameter files, such as integers, floats, complex arrays, and strings. +The functionality is designed to support the manipulation and analysis of data from Paravision parameter files, +ensuring data integrity and accessibility. + +Classes: + Parser: A class that offers a comprehensive suite of methods for parsing parameter data, + supporting complex data structures and providing tools to convert and clean data efficiently. +""" + import re import numpy as np from collections import OrderedDict, defaultdict @@ -25,34 +37,31 @@ class Parser: - """ - Parser class for handling parameter dictionaries. + """A utility class for parsing and converting parameter data from string representations. - This class provides methods for loading parameters from a list of strings, converting strings to specific data types, cleaning up array elements, processing complex arrays, parsing shapes, parsing data, parsing array data, and converting data to specified shapes. + The Parser class uses regular expressions to identify and convert data types found in parameter files. It handles typical data formats including integers, floats, strings, and complex arrays, making them amenable for further processing and analysis. Methods: - load_param: JCAMP DX parser that loads parameters from a list of strings. - convert_string_to: Converts a string to a specific data type if it matches certain patterns. - clean_up_elements_in_array: Cleans up array elements by replacing patterns with repeated values. - process_bisarray: Determines the case of an array with BIS prefix by converting each element to a specific data type. - process_complexarray: Process a complex array and return a parsed dictionary. - process_string: Process a string and return the parsed data based on its shape. - parse_shape: Parse the shape of the data. - parse_data: Parse the data based on its format. - parse_array_data: Parse the array data. - convert_data_to: Convert the given data to the specified shape. + load_param(stringlist): Parses parameters from a list of strings, identifying headers and parameters. + convert_string_to(string): Converts strings to appropriate data types based on their content. + clean_up_elements_in_array(data): Cleans array elements by handling patterns and replacing them with repeated values. + process_complexarray(data): Converts complex nested array strings into structured dictionary formats. + parse_shape(shape): Interprets textual shape descriptions into tuple or list formats. + parse_data(data): Converts string data into lists or single values depending on the structure. + convert_data_to(data, shape): Transforms data into the specified shape or data type. """ @staticmethod def load_param(stringlist): - """JCAMP DX parser that loads parameters from a list of strings. + """Parses parameters from a list of string representations of a JCAMP DX file. + + Each string is inspected for key-value pairs that represent parameters or headers. + This method categorizes and stores them accordingly. Args: - stringlist (list): A list of strings containing parameter information. + stringlist (list[str]): A list of strings, each containing a line from a JCAMP DX file. Returns: - params (OrderedDict): An ordered dictionary containing the parsed parameters, where the key is the line number and the value is a tuple of the parameter type, key, and value. - param_addresses (list): A list of line numbers where parameters were found. - stringlist (list): The original list of strings. + tuple: A tuple containing an OrderedDict of parameters, a list of line numbers where parameters are found, and the original list of strings. """ params = OrderedDict() param_addresses = [] @@ -74,13 +83,13 @@ def load_param(stringlist): @staticmethod def convert_string_to(string): - """Converts a string to a specific data type if it matches certain patterns. + """Converts a string to an integer, float, or string based on its content, using regular expression matching. Args: string (str): The string to be converted. Returns: - float, int, or str or None: The converted value of the string, or None if the string is empty. + int, float, str, or None: The converted value of the string, or None if the string is empty. """ string = string.strip() if re.match(ptrn_string, string): @@ -137,19 +146,13 @@ def process_bisarray(elements, shape): @staticmethod def process_complexarray(data): - """ - Process a complex array and return a parsed dictionary. + """Processes a string representation of a complex nested array and converts it into a structured dictionary format. Args: - data: The complex array to be processed. + data (str): The complex array string to be processed. Returns: - dict: A dictionary containing the parsed data. - - Examples: - >>> data = [1, [2, 3], [[4, 5], [6, 7]]] - >>> process_complexarray(data) - {'level_1': [[1]], 'level_2': [[2, 3]], 'level_3': [[4, 5], [6, 7]]} + dict: A dictionary representing the structured levels of the array, categorized by depth. """ data_holder = copy(data) parser = defaultdict(list) @@ -164,8 +167,7 @@ def process_complexarray(data): @staticmethod def process_string(data, shape): - """ - Process a string and return the parsed data based on its shape. + """Process a string and return the parsed data based on its shape. Args: data: The string to be processed. @@ -173,17 +175,6 @@ def process_string(data, shape): Returns: tuple: A tuple containing the parsed data and an empty string, or the processed string. - - Examples: - >>> data = "[1, 2, 3]" - >>> shape = "(3,)" - >>> process_string(data, shape) - ([1, 2, 3], '') - - >>> data = "Hello, World!" - >>> shape = "" - >>> process_string(data, shape) - 'Hello, World!' """ shape = Parser.parse_shape(shape) if elements := re.findall(ptrn_bisstring, data): @@ -201,8 +192,7 @@ def process_string(data, shape): @staticmethod def parse_shape(shape): - """ - Parse the shape of the data. + """Parse the shape of the data. Args: shape: The shape of the data. @@ -212,23 +202,6 @@ def parse_shape(shape): Raises: ValueError: If the shape is invalid. - - Examples: - >>> shape = "(3, 4)" - >>> parse_shape(shape) - '3, 4' - - >>> shape = "3, 4" - >>> parse_shape(shape) - '3, 4' - - >>> shape = "(3, 4, 5)" - >>> parse_shape(shape) - '3, 4, 5' - - >>> shape = "(3, 4,)" - >>> parse_shape(shape) - ValueError: Invalid shape: (3, 4,) """ if shape != -1: shape = re.sub(ptrn_array, r'\g', shape) @@ -238,31 +211,13 @@ def parse_shape(shape): @staticmethod def parse_data(data): - """ - Parse the data based on its format. + """Parse the data based on its format. Args: data: The data to be parsed. Returns: list or str: The parsed data. - - Examples: - >>> data = "[1, 2, 3]" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "1, 2, 3" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "1 2 3" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "Hello, World!" - >>> parse_data(data) - 'Hello, World!' """ if matched := re.findall(ptrn_array, data): return Parser.parse_array_data(matched) @@ -274,17 +229,13 @@ def parse_data(data): @staticmethod def parse_array_data(matched): - """ - Parse the array data. + """Parse the array data. Args: matched: A list of strings representing the matched array data. Returns: list: The parsed array data. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. """ if any(',' in cell for cell in matched): return [[Parser.convert_string_to(c) for c in cell.split(',')] for cell in matched] @@ -292,8 +243,7 @@ def parse_array_data(matched): @staticmethod def convert_data_to(data, shape): - """ - Convert the given data to the specified shape. + """Convert the given data to the specified shape. Args: data: The data to be converted. @@ -301,9 +251,6 @@ def convert_data_to(data, shape): Returns: object: The converted data. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. """ if isinstance(data, str): data, shape = Parser.process_string(data, shape) diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py index 931cb5b..6d519d5 100644 --- a/brkraw/api/pvobj/pvfiles.py +++ b/brkraw/api/pvobj/pvfiles.py @@ -1,49 +1,94 @@ -import os +"""Provides the PvFiles class for managing individual files within a Paravision dataset. + +This module includes the PvFiles class, derived from BaseMethods, specifically tailored to manage non-standard or loosely organized files within a dataset. It offers functionalities for dynamically handling arbitrary file inputs, making it versatile for datasets that do not conform to standard directory structures typically expected in Paravision studies. + +Classes: + PvFiles: Manages individual file access and operations, providing methods to handle arbitrary files efficiently and effectively. This class is especially useful for datasets that require flexible file management strategies. +""" + +from __future__ import annotations from .base import BaseMethods -from pathlib import Path +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path class PvFiles(BaseMethods): + """Manages arbitrary files within a Paravision dataset, providing flexible file handling capabilities. + + This class extends BaseMethods to provide specialized handling of files that may not necessarily fit into + a structured directory or standardized dataset format. It is particularly useful for datasets where files + are spread across different locations or need to be accessed without a fixed directory structure. + + Attributes: + _path (list): A list of resolved file paths that are currently managed by this instance. + _contents (dict): A dictionary representing the contents currently available in this instance. + """ def __init__(self, *files: Path): - """_summary_ + """Initializes the PvFiles object with one or more files. Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir + *files (Path): An arbitrary number of Path objects pointing to the files to be managed. """ self.update(*files) def update(self, *files: Path): - self._path = [os.path.abspath(f) for f in files if os.path.exists(f)] - self._contents = {"files": [os.path.basename(f) for f in self._path], + """Updates the managed files in the PvFiles instance. + + Args: + *files (Path): An arbitrary number of Path objects pointing to the files to be managed. + + Notes: + This method updates the list of file paths and the contents dictionary based on the files provided. + """ + self + self._path = [self._resolve(f) for f in files if self._resolve(f).exists()] + self._contents = {"files": [f.name for f in self._path], "dirs": [], "file_indexes": []} - def _open_as_fileobject(self, key): - """Override open_as_fileobject method + def _open_as_fileobject(self, key: str): + """Opens a file as a file object based on the specified key. Args: - key: The key to identify the file. + key (str): The key or part of the file name to identify the file to open. Returns: - file object: The opened file object. + file object: The opened file object corresponding to the key. Raises: - ValueError: If the key does not exist in the files. + KeyError: If the file corresponding to the key does not exist in the managed files. """ if file_path := self._search_file_path(key): return open(file_path, 'rb') raise KeyError(f'Failed to find filename "{key}" from input files.\n [{self.contents.get("files")}]') - def _search_file_path(self, key): + def _search_file_path(self, key: str): + """Searches for a file path that includes the specified key. + + Args: + key (str): A substring of the file name to search for among the managed files. + + Returns: + str or False: The full path of the file if found, False otherwise. + """ if files := [f for f in self._path if key in f]: return files.pop() else: return False def get_visu_pars(self, _:None=None): - """ Mock function of PvScan """ + """A mock function to mimic getting 'visu_pars', typically used for testing or compatibility. + + Returns: + str: The contents of 'visu_pars' if it exists, mimics behavior of similar functions in related classes. + """ return getattr(self, 'visu_pars') @property def path(self): + """Returns the paths of the managed files. + + Returns: + list: A list of file paths being managed by this instance. + """ return self._path diff --git a/brkraw/api/pvobj/pvreco.py b/brkraw/api/pvobj/pvreco.py index 9440801..0656506 100644 --- a/brkraw/api/pvobj/pvreco.py +++ b/brkraw/api/pvobj/pvreco.py @@ -1,56 +1,77 @@ +"""Module providing the PvReco class, a component of Paravision Objects. + +The PvReco class is designed to manage individual reconstructions within a scan from Paravision datasets. +It extends the BaseMethods class to incorporate more specific functionalities such as managing compressed data formats and +directly handling the file paths and contents of reconstruction data. +The class is particularly geared towards handling the details at the reconstruction level, enabling detailed management and +access to specific types of imaging data. It includes functionalities to initialize reconstructions, update their contents, +and provide access paths, ensuring that data can be accessed and manipulated efficiently and effectively. + +Classes: + PvReco: Manages the data and processes related to individual reconstructions within a Paravision scan, providing tools + to handle and organize the specific data associated with those reconstructions. +""" + +from __future__ import annotations import os import warnings from .base import BaseMethods +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Tuple, Dict + from typing import Optional + from pathlib import Path class PvReco(BaseMethods): - """ - A class representing a PvReco object. + """Manages the reconstruction-specific data within a scan in a Paravision study. - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. + This class extends `BaseMethods` to provide specialized handling of the data associated with a particular + reconstruction. It supports both compressed and uncompressed data formats and provides utilities to manage + and access reconstruction-specific details. + Attributes: + is_compressed (bool): Indicates whether the dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the reconstruction's data. + scan_id (int): Identifier for the scan associated with this reconstruction. + reco_id (int): Identifier for this specific reconstruction. + Args: scan_id (int): The ID of the scan. reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Properties: - path (str): The path. + pathes (Tuple[Path, Path]): Contains the root path and specific reconstruction path. + contents (Optional[Dict], optional): Initial content data for the reconstruction. """ - def __init__(self, scan_id, reco_id, pathes, contents): - """ - Initialize a Dataset object. + def __init__(self, scan_id: int, reco_id: int, pathes: Tuple['Path', 'Path'], + contents: Optional['Dict']=None): + """Initializes the PvReco object with specified identifiers, paths, and optional contents. Args: - scan_id (int): The ID of the scan. - reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Attributes: - _scan_id (int): The ID of the scan. - _reco_id (int): The ID of the reconstruction. - _rootpath (str): The root path. - _path (str): The path. - _contents (list): The list of contents. + scan_id (int): The identifier of the scan to which this reconstruction belongs. + reco_id (int): The unique identifier for this reconstruction within its scan. + pathes (Tuple[Path, Path]): A tuple containing the root path and the specific path for this reconstruction. + contents (Dict, optional): A dictionary representing the initial contents of the reconstruction. + + Raises: + FileNotFoundError: If the provided paths do not exist or are not accessible. + ValueError: If the paths provided do not lead to expected data formats or locations. """ self._scan_id = scan_id self._reco_id = reco_id - self._rootpath, self._path = pathes + self._rootpath = self._resolve(pathes[0]) + self._path = self._resolve(pathes[1]) self._contents = contents self.is_compressed = True if contents.get('file_indexes') else False @property def path(self): - """ - A property representing the path. + """Constructs and returns the full filesystem path for this reconstruction. + + If the reconstruction data is compressed, this returns a tuple of paths; otherwise, + it combines them into a single filesystem path. Returns: - str: The path. + Union[Tuple[Path, Path], str]: The full path or paths to the reconstruction data. """ path = (self._rootpath, self._path) if self.is_compressed: @@ -58,5 +79,15 @@ def path(self): return os.path.join(*path) def get_fid(self): + """Issues a warning that the 'get_fid' method is not supported for PvReco objects. + + This method is typically used at the scan or study level, not at the reconstruction level. + + Returns: + None + + Raises: + Warning: Always warns that the method is not applicable for PvReco objects. + """ warnings.warn(f'{self.__class__} does not support get_fid method. use Scan- or Study-level object instead') return None \ No newline at end of file diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py index 72e0aa3..00588f9 100644 --- a/brkraw/api/pvobj/pvscan.py +++ b/brkraw/api/pvobj/pvscan.py @@ -1,65 +1,68 @@ +"""Provides the PvScan class for managing individual scan data within a Paravision study. + +This module includes the PvScan class, derived from BaseMethods, to manage and interact with individual +scans and their respective reconstructions. It handles the organization, retrieval, and processing of scan-specific information, +supporting both compressed and uncompressed data formats. + +Classes: + PvScan: Manages a single scan's dataset, organizing reconstructions and handling specific data retrieval efficiently. +""" + from __future__ import annotations import os from collections import OrderedDict -from typing import Optional, Tuple, Dict, TYPE_CHECKING from .base import BaseMethods from .pvreco import PvReco +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Tuple, Dict from pathlib import Path class PvScan(BaseMethods): - """ - A class representing a PvScan object. + """Represents and manages an individual scan within a Paravision study dataset. - Inherits from BaseMethods. + Inherits from BaseMethods to utilize general methods for file handling and dataset validation. + Manages the data associated with a single scan, including various reconstructions, both compressed and uncompressed. Attributes: - is_compressed (bool): Indicates if the dataset is compressed. + is_compressed (bool): Indicates whether the scan's dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the scan's dataset. + avail (list): A list of IDs representing the available reconstructions within the scan. + contents (dict): A structured dictionary representing the organized contents of the scan. Methods: - update(contents): Update the contents of the dataset. - set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. - get_reco(reco_id): Get a specific reco object by ID. - - Properties: - path (str): The path. - avail (list): A list of available items. - contents (dict): A dictionary of pvscan contents. + update(contents): Updates the contents of the scan with new data. + set_reco(path, reco_id, contents): Initializes a PvReco object for a specific reconstruction. + get_reco(reco_id): Retrieves a PvReco object for a given reconstruction ID. """ def __init__(self, scan_id: Optional[int], pathes: Tuple[Path, Path], contents: Optional[Dict]=None, recos: Optional[OrderedDict]=None): - """ - Initialize a Dataset object. + """Initializes a PvScan object with the specified scan ID, paths, and optional contents and reconstructions. Args: scan_id (int): The ID of the scan. - pathes (tuple): A tuple containing the root path and the path. - contents (dict, optional): The initial contents of the dataset. Defaults to None. - recos (dict, optional): A dictionary of reco objects. Defaults to None. - - Attributes: - _scan_id (int): The ID of the scan. - _rootpath (str): The root path. - _path (str): The path. - _recos (OrderedDict): An ordered dictionary of reco objects. - - Methods: - update(contents): Update the contents of the dataset. + pathes (tuple): A tuple containing the root path and the specific scan path. + contents (dict, optional): The initial contents of the scan's dataset. Defaults to None. + recos (OrderedDict, optional): A dictionary of PvReco objects. Defaults to None. + + Raises: + FileNotFoundError: If the paths do not exist or are invalid. + ValueError: If the paths are neither directories nor recognizable compressed file formats. """ self._scan_id = scan_id - self._rootpath, self._path = pathes + self._rootpath = self._resolve(pathes[0]) + self._path = self._resolve(pathes[1]) self.update(contents) self._recos = OrderedDict(recos) if recos else OrderedDict() def update(self, contents: Dict): - """ - Update the contents of the dataset. + """pdates the contents of the scan's dataset. Args: - contents (dict): The new contents of the dataset. + contents (dict): The new contents to update the dataset with. Returns: None @@ -69,13 +72,12 @@ def update(self, contents: Dict): self._contents = contents def set_reco(self, path: Path, reco_id: int, contents: Dict): - """ - Set a reco object with the specified path, ID, and contents. + """Initializes and stores a PvReco object for a specific reconstruction within the scan. Args: - path (str): The path of the reco object. - reco_id (int): The ID of the reco object. - contents (list): The contents of the reco object. + path (Path): The path to the reconstruction data. + reco_id (int): The unique identifier for the reconstruction. + contents (Dict): The data associated with the reconstruction. Returns: None @@ -83,21 +85,38 @@ def set_reco(self, path: Path, reco_id: int, contents: Dict): self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) def get_reco(self, reco_id: int): - """ - Get a specific reco object by ID. + """Retrieves the PvReco object associated with the specified reconstruction ID. Args: - reco_id (int): The ID of the reco object to retrieve. + reco_id (int): The ID of the reconstruction to retrieve. Returns: - object: The specified reco object. + PvReco: The reconstruction object. Raises: - KeyError: If the specified reco ID does not exist. + KeyError: If the specified reconstruction ID does not exist within the scan. """ return self._recos[reco_id] def get_visu_pars(self, reco_id: Optional[int] = None): + """Retrieves visualization parameters ('visu_pars') for the scan or a specific reconstruction. + + This method attempts to find and return the 'visu_pars' file. It looks for this file in the following order: + 1. In a specific reconstruction, if `reco_id` is provided. + 2. Directly within the scan's own contents, if available. + 3. In the first available reconstruction that contains 'visu_pars'. + + Args: + reco_id (Optional[int]): The ID of the reconstruction from which to retrieve 'visu_pars'. If None, + the method searches across the scan and all its reconstructions. + + Returns: + The visualization parameters as specified in 'visu_pars'. + + Raises: + FileNotFoundError: If 'visu_pars' cannot be found in the specified reconstruction, within the scan, + or across any of the available reconstructions. + """ if reco_id: return getattr(self.get_reco(reco_id), 'visu_pars') elif 'visu_pars' in self.contents['files']: @@ -111,11 +130,10 @@ def get_visu_pars(self, reco_id: Optional[int] = None): @property def path(self): - """ - A property representing the path. + """Provides the combined filesystem path of the scan's dataset. Returns: - str: The path. + str: The full path combining the root and specific scan path. """ path = (self._rootpath, self._path) if self.is_compressed: @@ -124,10 +142,9 @@ def path(self): @property def avail(self): - """ - A property representing the available items. + """Provides a list of available reconstruction IDs within the scan. Returns: - list: A list of available items. + list: A sorted list of available reconstruction IDs. """ return sorted(list(self._recos)) \ No newline at end of file diff --git a/brkraw/api/pvobj/pvstudy.py b/brkraw/api/pvobj/pvstudy.py index 7a1d717..fd69bc8 100755 --- a/brkraw/api/pvobj/pvstudy.py +++ b/brkraw/api/pvobj/pvstudy.py @@ -1,68 +1,67 @@ +"""Provides the PvStudy class, which serves as a comprehensive handler for entire Paravision study datasets. + +This module includes the PvStudy class, derived from BaseMethods, to manage and interact with datasets that may +include multiple scans and various data types, both compressed and uncompressed. It facilitates the organization, +retrieval, and processing of study-specific information and individual scans, enhancing the handling of complex +imaging data. + +Classes: + PvStudy: Manages an entire study's dataset, organizing scans and handling specific data retrieval efficiently. +""" + +from __future__ import annotations import re import zipfile from collections import OrderedDict -from pathlib import Path from .base import BaseMethods from .pvscan import PvScan +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path class PvStudy(BaseMethods): - """A class representing a PvStudy object. + """Represents and manages an entire Paravision study dataset. - Inherits from BaseMethods. + Inherits from BaseMethods to utilize general methods for file handling and dataset validation. + Manages multiple scans and their respective data, supporting both compressed and uncompressed formats. Attributes: - is_compressed (bool): Indicates if the dataset is compressed. + is_compressed (bool): Indicates whether the dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the study dataset. + avail (list): A list of IDs representing the available scans within the dataset. + contents (dict): A structured dictionary representing the organized contents of the dataset. Methods: - get_scan(scan_id): Get a specific scan object by ID. - - Properties: - path (str): The path of the object. - avail (list): A list of available scans. - contents (dict): A dictionary of pvdataset contents. + get_scan(scan_id): Retrieves a PvScan object for a given scan ID, facilitating detailed access to specific scans. """ def __init__(self, path: Path, debug: bool=False): - """Initialize the object with the given path and optional debug flag. + """Initializes a PvStudy object with the specified path and debug settings. Args: - path: The path to initialize the object with. - debug: A flag indicating whether debug mode is enabled. - **kwargs: Additional keyword arguments. + path (Path): The filesystem path to the dataset. + debug (bool, optional): If set to True, enables debug mode which may affect logging and error reporting. Raises: - Any exceptions raised by _check_dataset_validity or _construct methods. - - Notes: - If 'pvstudy' is present in kwargs, it will be used to initialize the object via super(). - - Examples: - obj = ClassName(path='/path/to/dataset', debug=True) + FileNotFoundError: If the path does not exist or is invalid. + ValueError: If the path is neither a directory nor a recognizable compressed file format. """ - if not debug: - self._check_dataset_validity(path) + self._check_dataset_validity(self._resolve(path)) self._construct() # internal method def _check_dataset_validity(self, path: Path): - """Checks the validity of a given dataset path. - - Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, - and does not check the validity of a `PvStudy`. + """Validates the provided path to ensure it points to a viable dataset. Args: - path (str): The path to check. + path (Path): The path to validate. Raises: FileNotFoundError: If the path does not exist. - ValueError: If the path is not a directory or a file, or if it does not meet the required criteria. - - Returns: - None + ValueError: If the path is neither a directory nor a valid compressed file. """ - path = Path(path) if isinstance(path, str) else path - self._path: Path = path.absolute() + self._path = path if not self._path.exists(): raise FileNotFoundError(f"The path '{self._path}' does not exist.") if self._path.is_dir(): @@ -75,17 +74,9 @@ def _check_dataset_validity(self, path: Path): raise ValueError(f"The path '{self._path}' does not meet the required criteria.") def _construct(self): - """Constructs the object by organizing the contents. + """Organizes the dataset contents by parsing directories and files, structuring them for easy access. - This method constructs the object by organizing the contents based on the provided directory structure. - It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. - After processing, it removes the processed paths from the `_contents` dictionary. - - Args: - **kwargs: keyword argument for datatype specification. - - Returns: - None + Processes directories to segregate scans and their respective data, handling both uncompressed and compressed datasets. """ self._scans = OrderedDict() self._backup = OrderedDict() @@ -111,15 +102,6 @@ def _process_childobj(self, matched, item): Returns: str: The path of the processed child object. - - Raises: - None. - - Examples: - # Example usage of _process_childobj - matched = re.match(pattern, input_string) - item = ('path/to/child', {'dirs': set(), 'files': [], 'file_indexes': []}) - result = obj._process_childobj(matched, item, pvscan={'binary_files': [], 'parameter_files': ['method', 'acqp', 'visu_pars']}) """ path, contents = item scan_id = int(matched.group(1)) @@ -136,11 +118,34 @@ def _process_childobj(self, matched, item): @property def contents(self): + """Retrieves the contents of the study that include 'subject' in their files list. + + This property filters the study's dataset contents, returning only those parts of the dataset + where the 'subject' file is present, which is typically critical for study-specific information. + + Returns: + dict: The dictionary of contents that includes 'subject' among its files. + """ for _, contents in super().contents.items(): if 'subject' in contents['files']: return contents def _clear_contents(self, to_be_removed): + """Clears specified contents from the dataset's memory structure. + + This method attempts to remove paths listed in `to_be_removed` from the dataset's content dictionary. + If a path cannot be found (i.e., it's already been removed or never existed), it logs the path to `_dummy` + for further debugging or inspection. + + Args: + to_be_removed (list): A list of paths to be removed from the dataset's contents. + + Returns: + None + + Notes: + The `_dummy` list can be used to track removal errors or inconsistencies in the dataset's path management. + """ for path in to_be_removed: try: del self._contents[path] @@ -149,35 +154,40 @@ def _clear_contents(self, to_be_removed): @property def path(self): - """Gets the path of the object. + """Returns the filesystem path of the study dataset. Returns: - str: The path of the object. + str: The path to the dataset. """ return self._path @property def avail(self): - """A property representing the available scans. + """Provides a list of available scan IDs within the dataset. Returns: - list: A list of available scans. + list: A sorted list of available scan IDs. """ return sorted(list(self._scans)) def get_scan(self, scan_id: int): - """Get a specific scan object by ID. + """Retrieves the scan object associated with the specified scan ID. Args: - scan_id (int): The ID of the scan object to retrieve. + scan_id (int): The unique identifier for the scan. Returns: - object: The specified scan object. + PvScan: The scan object associated with the given ID. Raises: - KeyError: If the specified scan ID does not exist. + KeyError: If there is no scan associated with the provided ID. """ return self._scans[scan_id] def __dir__(self): + """Customizes the directory listing to include specific attributes and methods. + + Returns: + list: A list of attribute names and methods available in this object. + """ return super().__dir__() + ['path', 'avail', 'get_scan'] From 48e6ecce3d824403a5885a3b5d8c525f35e67d75 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 5 May 2024 10:40:53 -0400 Subject: [PATCH 09/16] feat(brkraw v0.4.0) save intermediate states - work in progress, unstable code --- .flake8 | 5 +- .github/workflows/test.yml | 2 +- .gitignore | 9 +- brkraw/__init__.py | 11 +- brkraw/api/__init__.py | 9 +- brkraw/api/config/__init__.py | 3 - brkraw/api/config/fetcher/__init__.py | 14 - brkraw/api/config/fetcher/base.py | 187 ---------- brkraw/api/config/fetcher/snippets.py | 184 ---------- brkraw/api/config/manager.py | 149 -------- brkraw/api/config/snippet/__init__.py | 6 - brkraw/api/config/snippet/app.py | 8 - brkraw/api/config/snippet/base.py | 13 - brkraw/api/config/snippet/bids.py | 8 - brkraw/api/config/snippet/loader.py | 84 ----- brkraw/api/config/snippet/plugin.py | 229 ------------- brkraw/api/config/snippet/preset.py | 8 - brkraw/api/data/study.py | 22 +- brkraw/api/data/study.yaml | 5 + brkraw/api/helper/recipe.py | 112 ------ brkraw/api/pvobj/base.py | 19 +- brkraw/api/pvobj/types.py | 24 ++ brkraw/app/backup/__init__.py | 41 +++ brkraw/app/backup/cache.py | 175 ++++++++++ brkraw/app/backup/handler.py | 477 ++++++++++++++++++++++++++ brkraw/app/tonifti/__init__.py | 130 +++---- brkraw/app/tonifti/base.py | 168 +++++---- brkraw/app/tonifti/header.py | 36 +- brkraw/app/tonifti/plugin.py | 54 +++ brkraw/app/tonifti/scan.py | 11 +- brkraw/app/tonifti/study.py | 37 +- brkraw/app/tonifti/types.py | 18 + brkraw/app/viewer/__init__.py | 0 brkraw/app/viewer/config.py | 17 + brkraw/app/viewer/main_win.py | 215 ++++++++++++ brkraw/app/viewer/previewer.py | 225 ++++++++++++ brkraw/app/viewer/scan_info.py | 72 ++++ brkraw/app/viewer/scan_list.py | 73 ++++ brkraw/app/viewer/subj_info.py | 128 +++++++ brkraw/{api/config => }/config.yaml | 22 +- mypy.ini | 3 + pyproject.toml | 31 +- tests/01_api_pvobj_test.py | 19 + tests/conftest.py | 40 +++ 44 files changed, 1874 insertions(+), 1229 deletions(-) delete mode 100644 brkraw/api/config/__init__.py delete mode 100644 brkraw/api/config/fetcher/__init__.py delete mode 100644 brkraw/api/config/fetcher/base.py delete mode 100644 brkraw/api/config/fetcher/snippets.py delete mode 100644 brkraw/api/config/manager.py delete mode 100644 brkraw/api/config/snippet/__init__.py delete mode 100644 brkraw/api/config/snippet/app.py delete mode 100644 brkraw/api/config/snippet/base.py delete mode 100644 brkraw/api/config/snippet/bids.py delete mode 100644 brkraw/api/config/snippet/loader.py delete mode 100644 brkraw/api/config/snippet/plugin.py delete mode 100644 brkraw/api/config/snippet/preset.py delete mode 100644 brkraw/api/helper/recipe.py create mode 100644 brkraw/api/pvobj/types.py create mode 100644 brkraw/app/backup/__init__.py create mode 100644 brkraw/app/backup/cache.py create mode 100644 brkraw/app/backup/handler.py create mode 100644 brkraw/app/tonifti/plugin.py create mode 100644 brkraw/app/tonifti/types.py create mode 100644 brkraw/app/viewer/__init__.py create mode 100644 brkraw/app/viewer/config.py create mode 100644 brkraw/app/viewer/main_win.py create mode 100644 brkraw/app/viewer/previewer.py create mode 100644 brkraw/app/viewer/scan_info.py create mode 100644 brkraw/app/viewer/scan_list.py create mode 100644 brkraw/app/viewer/subj_info.py rename brkraw/{api/config => }/config.yaml (52%) create mode 100644 mypy.ini create mode 100644 tests/01_api_pvobj_test.py create mode 100644 tests/conftest.py diff --git a/.flake8 b/.flake8 index 43e2629..f93bc67 100644 --- a/.flake8 +++ b/.flake8 @@ -7,4 +7,7 @@ exclude = env, venv, max-line-length = 127 -max-complexity=10 \ No newline at end of file +max-complexity = 10 +ignore = W291, W293 +docstring-convention = google +mypy-config = ./mypy.ini \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dc91c2c..b6b719c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -81,7 +81,7 @@ jobs: run: | python -m pip install --upgrade pip pip install .[dev] - pip install .[SimpleITK] + pip install .[legacy] - name: Install tutorial run: make tests/tutorials diff --git a/.gitignore b/.gitignore index e8825c8..2c9f9c3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,10 @@ build *.egg-info *.egg-info/* .DS_Store +.mypy_cache +.pytest_cache + +tests/.brkraw +tests/_*.ipynb tests/tutorials -_test*.py -_*.ipynb -_*.log \ No newline at end of file +tests/_datasets \ No newline at end of file diff --git a/brkraw/__init__.py b/brkraw/__init__.py index 82a46a7..d2fcca9 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,10 +1,13 @@ from .lib import * -from .api import ConfigManager +from xnippy import Xnippy as ConfigManager -config = ConfigManager() -__version__ = '0.4.00' -__all__ = ['BrukerLoader', '__version__', 'config'] +__version__ = '0.4.0' +config = ConfigManager(package_name=__package__, + package_version=__version__, + package__file__=__file__, + config_filename='config.yaml') +__all__ = ['BrukerLoader', '__version__', 'config'] def load(path): return BrukerLoader(path) diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 2c9217a..a6bbfb6 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,5 +1,6 @@ -from .data import Study -from .config import Manager as ConfigManager -from .config.snippet.plugin import PlugIn as PlugInSnippet +# from .data import Study +from xnippy.snippet.plugin import PlugIn as PlugInSnippet +from xnippy.formatter import PathFormatter -__all__ = ['Study', 'ConfigManager', 'PlugInSnippet'] \ No newline at end of file + +__all__ = ['Study', 'PlugInSnippet', 'PathFormatter'] \ No newline at end of file diff --git a/brkraw/api/config/__init__.py b/brkraw/api/config/__init__.py deleted file mode 100644 index 6698a67..0000000 --- a/brkraw/api/config/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .manager import Manager - -__all__ = ['Manager'] \ No newline at end of file diff --git a/brkraw/api/config/fetcher/__init__.py b/brkraw/api/config/fetcher/__init__.py deleted file mode 100644 index eac06c5..0000000 --- a/brkraw/api/config/fetcher/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Initialization for the fetcher module. - -This module consolidates various fetching functionalities and exposes the Snippets class -for fetching and managing snippets from local and remote sources. - -Exposes: - SnippetsFetcher: A class derived from the Snippets module, tailored to handle the fetching, - storage, and synchronization of code snippets or configurations from - designated sources. -""" - -from .snippets import Snippets as SnippetsFetcher - -__all__ = ['SnippetsFetcher'] \ No newline at end of file diff --git a/brkraw/api/config/fetcher/base.py b/brkraw/api/config/fetcher/base.py deleted file mode 100644 index 9fdfd27..0000000 --- a/brkraw/api/config/fetcher/base.py +++ /dev/null @@ -1,187 +0,0 @@ -"""Provides a base Fetcher class for accessing and manipulating content from remote repositories. - -This module is designed to facilitate the retrieval of repository data, specifically from GitHub, -by providing methods to authenticate, fetch, and traverse directories. It integrates direct -API requests to handle repository contents and provides utility functions for downloading files -and walking through repository directories recursively. - -Classes: - Fetcher: A base class for fetching content from remote repositories with GitHub API integration. -""" - -from __future__ import annotations -import re -import warnings -import requests -from brkraw.api.util.package import PathResolver -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import Optional, Union - from typing import List, Tuple, Generator - - -class Fetcher(PathResolver): - """Base class for fetching remote content with methods to authenticate and navigate repositories. - - The Fetcher class extends the functionality of PathResolver to include methods that handle - the authentication and retrieval of data from remote GitHub repositories. It provides - utilities to walk through repository directories, fetch file and directory contents, - and download files as needed. - - Attributes: - _auth (Union[List[Tuple[str, str]], Tuple[str, str]]): Authentication credentials for the repository. - repos (dict): Configuration for the repositories to be accessed. - """ - _auth: Union[List[Tuple[str, str]], Tuple[str, str]] - repos: dict - - @staticmethod - def is_connected(): - """Check if there is an internet connection available by pinging a known URL. - - Returns: - bool: True if the connection is successful, False otherwise. - """ - try: - Fetcher._fetch_from_url('https://api.github.com') - except (requests.ConnectTimeout, requests.ConnectionError, requests.RequestException): - return False - return True - - def _set_auth(self): - """Set up authentication credentials for accessing configured repositories. - - Extracts and sets authentication details for each repository from the provided configurations. - """ - if isinstance(self.repos, list): - self._auth = [self._fetch_auth(repo) for repo in self.repos] - - @staticmethod - def _fetch_auth(repo_dict: dict): - """Fetch authentication credentials from a repository configuration. - - Args: - repo_dict (dict): Repository configuration containing 'auth' fields. - - Returns: - Optional[Tuple[str, str]]: A tuple containing username and token if both are present, otherwise None. - """ - if 'auth' in repo_dict: - username = repo_dict['auth']['username'] - token = repo_dict['auth']['token'] - return (username, token) if username and token else None - return None - - @staticmethod - def _walk_github_repo(repo_url: dict, path: Optional['str'] = None, auth: Tuple[str, str] = None): - """Recursively walk through directories in a GitHub repository to fetch directory and file structure. - - Args: - repo_url (dict): URL of the GitHub repository. - path (Optional[str]): Specific path in the repository to start the walk. - auth (Tuple[str, str]): Authentication credentials for accessing the repository. - - Yields: - dict: A dictionary containing 'path', 'dirs', and 'files' with their respective URLs. - """ - base_url = Fetcher._decode_github_repo(repo_url=repo_url, path=path) - return Fetcher._walk_dir(url=base_url, auth=auth) - - @staticmethod - def _walk_dir(url, path='', auth: Tuple[str, str] = None): - """Walk through a specific directory in a repository. - - Args: - url (str): URL of the directory to walk through. - path (str): Path relative to the repository root. - auth (Tuple[str, str]): Authentication credentials for accessing the repository. - - Yields: - dict: A dictionary containing the path, directories, and files within the directory. - """ - contents = Fetcher._fetch_from_url(url=url, auth=auth).json() - dirs, files = Fetcher._fetch_directory_contents(contents) - yield {'path':path, - 'dirs':{d['name']:d['url'] for d in dirs}, - 'files':{f['name']:f['download_url'] for f in files}} - - for dir in dirs: - new_path = f"{path}/{dir['name']}" if path else dir['name'] - new_url = dir['url'] - yield from Fetcher._walk_dir(url=new_url, path=new_path, auth=auth) - - @staticmethod - def _fetch_directory_contents(contents): - """Categorize contents of a directory into subdirectories and files. - - Args: - contents (list): List of contents from a directory. - - Returns: - tuple: A tuple containing lists of directories and files. - """ - dirs, files = [], [] - for i, item in enumerate(contents): - if item['type'] == 'dir': - dirs.append(item) - elif item['type'] == 'file': - files.append(item) - return dirs, files - - @staticmethod - def _decode_github_repo(repo_url: dict, path: Optional['str'] = None): - """Decode a GitHub repository URL to construct an API endpoint URL. - - Args: - repo_url (dict): The GitHub repository URL. - path (Optional[str]): An optional path within the repository. - - Returns: - str: A constructed API endpoint URL based on the repository details. - """ - ptrn_github = r'https://(?:[^/]+\.)?github\.com/(?P[^/]+)/(?P[^/.]+)(?:\.git])?' - if matched := re.match(ptrn_github, repo_url): - owner = matched['owner'] - repo = matched['repo'] - return f"https://api.github.com/repos/{owner}/{repo}/contents/{path}" if path \ - else f"https://api.github.com/repos/{owner}/{repo}/contents" - - @staticmethod - def _fetch_from_url(url: str, auth: Tuple[str, str] = None) -> Optional[requests.Response]: - """Fetch data from a given URL using optional authentication. - - Args: - url (str): The URL from which to fetch data. - auth (Tuple[str, str]): Optional authentication credentials. - - Returns: - Optional[requests.Response]: The response object if successful, otherwise None. - """ - response = requests.get(url, auth=auth) - if response.status_code == 200: - return response - else: - warnings.warn(f"Failed to retrieve contents: {response.status_code}", UserWarning) - return None - - @staticmethod - def _download_buffer(url: dict, - chunk_size: int = 8192, - auth: Tuple[str, str] = None) -> Union[Generator, bool]: - """Download file content from a URL in buffered chunks. - - Args: - url (dict): The URL of the file to download. - chunk_size (int): The size of each chunk in bytes. - auth (Tuple[str, str]): Optional authentication credentials. - - Returns: - Union[Generator, bool]: A generator yielding file chunks if successful, False on error. - """ - try: - response = requests.get(url, stream=True, auth=auth) - response.raise_for_status() - return response.iter_content(chunk_size=chunk_size) - except requests.RequestException as e: - warnings.warn(f'Error downloading the file: {e}') - return False diff --git a/brkraw/api/config/fetcher/snippets.py b/brkraw/api/config/fetcher/snippets.py deleted file mode 100644 index 128e029..0000000 --- a/brkraw/api/config/fetcher/snippets.py +++ /dev/null @@ -1,184 +0,0 @@ -"""Provides functionality to manage and synchronize snippets across local and remote sources. - -This module defines a `Snippets` class which aggregates snippets from various sources, -handles their synchronization, and ensures that the snippets are up-to-date according to -user-specified modes (plugin, preset, bids, app). It supports operations on snippets -fetched from both local file systems and remote repositories, offering features to check -connectivity, fetch content, and validate snippet integrity. - -Classes: - Snippets: Manages the aggregation and synchronization of snippets based on specified modes. -""" - -from __future__ import annotations -import os -import warnings -from pathlib import Path -from .base import Fetcher -from brkraw.api.config.snippet import PlugInSnippet -from brkraw.api.config.snippet import BIDSSnippet -from brkraw.api.config.snippet import PresetSnippet -from brkraw.api.config.snippet import AppSnippet -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import List - from typing import Tuple, Optional, Literal - from brkraw.api.config.snippet.base import Snippet - - -class Snippets(Fetcher): - """Manages the aggregation of snippets from various sources based on the specified mode. - - This class integrates local and remote snippet sources, handling their fetching, storing, - and updating based on connectivity and cache settings. - """ - path: Optional[Path] - mode: Literal['plugin', 'preset', 'bids', 'app'] - is_cache: bool - _fetched: bool = False - _template: List[Snippet] = [] - _remote_snippets: List[Snippet] = [] - _local_snippets: List[Snippet] = [] - _template_snippets: List[Snippet] = [] - - def __init__(self, - repos: dict, - mode: Literal['plugin', 'preset', 'bids', 'app'], - path: Tuple[Optional['Path'], 'bool'] = (None, False) - ) -> None: - """Initializes the Snippets object with specified repository configurations and operational mode. - - Args: - repos (dict): A dictionary containing repository configurations. - mode (Literal['plugin', 'preset', 'bids', 'app']): The operational mode determining the type of snippets to manage. - path (Tuple[Optional[Path], bool], optional): A tuple containing the path to local storage and a boolean indicating cache usage. - """ - self.repos = repos - self.mode = mode - self.path = self._resolve(path[0]) - self.is_cache = path[1] - self._set_auth() - self._fetch_local_contents() - self._template = [c[mode]['template'] for c in repos if 'template' in c[mode]] - - def _fetch_local_contents(self) -> Optional[list]: - """Fetches snippets from local storage based on the current mode and path settings. - - Gathers contents from the specified directory and converts them into snippets. This operation - is skipped if caching is enabled. - - Returns: - Optional[list]: Returns None if caching is enabled, otherwise returns a list of fetched local contents. - """ - if self.is_cache: - return None - if self.mode in ['plugin', 'preset', 'bids']: - contents = [] - for path, dirs, files in os.walk(self.path): - child = {'path':self._resolve(path), - 'dirs':{d:self._resolve(path) / d for d in dirs}, - 'files':{f:self._resolve(path) / f for f in files}} - contents.append(child) - self._convert_contents_to_snippets([contents], remote=False) - - def _fetch_remote_contents(self) -> None: - """Fetches snippets from remote repositories if connected and not previously fetched. - - Retrieves snippet data from remote sources as specified by the repository configuration - and converts them into snippet objects. Updates the fetched status upon completion. - """ - if self.repos and self.mode in ['plugin', 'preset', 'bids']: - contents = [self._walk_github_repo(repo_url=repo['url'], - path=repo[self.mode]['path'], - auth=self._auth[i]) for i, repo in enumerate(self.repos)] - self._convert_contents_to_snippets(contents=contents, remote=True) - self._fetched = True - - def _convert_contents_to_snippets(self, contents: list, remote: bool = False) -> None: - """Converts fetched contents from either local or remote sources into snippet objects. - - Iterates over fetched contents, creating snippet objects which are then stored appropriately - based on their validation status and whether they match predefined templates. - - Args: - contents (list): List of contents fetched from either local or remote sources. - remote (bool, optional): Flag indicating whether the contents are from remote sources. - """ - for repo_id, contents in enumerate(contents): - for c in contents: - if remote: - snippet = self._snippet(contents=c, auth=self._auth[repo_id], remote=remote) - self._store_remote_snippet(repo_id=repo_id, snippet=snippet) - else: - snippet = self._snippet(contents=c, remote=remote) - if snippet.is_valid and \ - snippet.name not in [s.name for s in self._local_snippets]: - self._local_snippets.append(snippet) - - def _store_remote_snippet(self, repo_id: int, snippet: Snippet): - """Stores validated remote snippets into the appropriate lists based on template matching. - - Checks if the snippet is valid and if it matches a template or not. Based on this, - the snippet is added to the respective list (template snippets or general remote snippets). - - Args: - repo_id (int): The repository ID corresponding to the snippet source. - snippet (Snippet): The snippet object to be stored. - """ - if not snippet.is_valid: - return None - if self._is_template(repo_id, snippet) and \ - snippet.name not in [s.name for s in self._template_snippets]: - self._template_snippets.append(snippet) - elif not self._is_template(repo_id, snippet) and \ - snippet.name not in [s.name for s in self._remote_snippets]: - self._remote_snippets.append(snippet) - - @property - def _snippet(self): - """Determines the snippet class based on the operational mode. - - Returns: - Type[Snippet]: Returns the class type corresponding to the operational mode (Plugin, Preset, BIDS, App). - """ - if self.mode == 'plugin': - return PlugInSnippet - elif self.mode == 'preset': - return PresetSnippet - elif self.mode == 'bids': - return BIDSSnippet - else: - return AppSnippet - - @property - def remote(self): - """Access the remote snippets if available. Fetches the snippets from a remote source if not already fetched - and if a network connection is available. - - Returns: - Any: The remote snippets if available and connected, otherwise None. - - Raises: - Warning: If the connection to fetch remote snippets fails. - """ - if self._remote_snippets: - return self._remote_snippets - else: - if self.is_connected(): - self._fetch_remote_contents() - return self._remote_snippets - else: - warnings.warn("Connection failed. Please check your network settings.") - return None - - def _is_template(self, repo_id: int, snippet: Snippet) -> bool: - """Test given snippet is template. This internal method used to exclude template snippets from avail.""" - return any(snippet.name == t for t in self._template[repo_id]) - - @property - def local(self): - return self._local_snippets - - @property - def is_up_to_date(self): - return self._fetched \ No newline at end of file diff --git a/brkraw/api/config/manager.py b/brkraw/api/config/manager.py deleted file mode 100644 index 3c94050..0000000 --- a/brkraw/api/config/manager.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Manager module for configuring, loading, or creating configuration files. - -This module facilitates the management of configuration settings within the application, -allowing configurations to be handled internally without file creation unless specifically -requested by the user through CLI to create them in the home folder. -""" - -from __future__ import annotations -import yaml -import warnings -from pathlib import Path -from .fetcher import SnippetsFetcher -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import Tuple, Literal, Union, Optional - - -class Manager: - """Manages the configuration settings for the application. - - This class ensures the existence of the configuration directory, loads or creates the configuration file, - sets configuration values, and retrieves configuration values. It operates both globally and locally - depending on the user's choice and the operational context. - """ - config: dict = {} - - def __init__(self, tmpdir: Optional[Path] = None) -> None: - """Initializes the configuration manager. - - This constructor sets up paths for the home directory, global and local configuration directories, - and configuration file. It ensures the configuration directory exists and loads or creates the - configuration based on its presence. - - Args: - tmpdir (Optional[Path]): Temporary directory for storing configurations, defaults to the home directory. - """ - self._home_dir = Path.home() - self._default_dir = Path(__file__).absolute().parent - self._local_dir = Path.cwd() / '.brkraw' - self._global_dir = self._home_dir / '.brkraw' - self._fname = 'config.yaml' - self._tmpdir = tmpdir or self._home_dir / '.tmp' - self.load() - - @property - def created(self) -> Union[Literal['global', 'local'], list[str], bool]: - """"Checks and returns the location where the configuration folder was created. - - Returns: - Union[Literal['global', 'local'], list[str], bool]: Returns 'global' or 'local' if the config folder was created at that level, - a list of locations if multiple exist, or False if no config folder is created. - """ - created = [(f / self._fname).exists() for f in [self._global_dir, self._local_dir]] - checked = [loc for i, loc in enumerate(['global', 'local']) if created[i]] - checked = checked.pop() if len(checked) == 1 else checked - return checked or False - - @property - def config_dir(self) -> 'Path': - """Determines and returns the appropriate configuration directory based on the existence and location of the config file. - - Returns: - Path: Path to the configuration directory based on its existence and scope (global or local). - """ - if isinstance(self.created, list): - return self._local_dir - elif isinstance(self.created, str): - return self._local_dir if self.created == 'local' else self._global_dir - return self._default_dir - - def load(self) -> None: - """Loads an existing configuration file or creates a new one if it does not exist, filling the 'config' dictionary with settings.""" - with open(self.config_dir / self._fname) as f: - self.config = yaml.safe_load(f) - - def create(self, target: Literal['local', 'global'] = 'local', - force: bool = False) -> bool: - """Creates a configuration file at the specified location. - - Args: - target (Literal['local', 'global']): Target directory for creating the configuration file, defaults to 'local'. - force (bool): If True, overwrites the existing configuration file, defaults to False. - - Returns: - bool: True if the file was created successfully, False otherwise. - """ - if not self.config: - self.load() - config_dir = self._local_dir if target == 'local' else self._global_dir - config_dir.mkdir(exist_ok=True) - config_file = config_dir / self._fname - if config_file.exists(): - if not force: - warnings.warn("Config file exists, please use 'force' option if you want overwrite.", - UserWarning) - return False - with open(config_dir / self._fname, 'w') as f: - yaml.safe_dump(self.config, f, sort_keys=False) - - def get_fetcher(self, mode: Literal['plugin', 'preset', 'bids', 'app']) -> SnippetsFetcher: - """Returns the appropriate fetcher based on the mode. - - Args: - mode (Literal['plugin', 'preset', 'bids', 'app']): The mode determining which type of fetcher to return. - - Returns: - SnippetsFetcher: An instance of SnippetsFetcher configured for the specified mode. - """ - if mode in ['plugin', 'preset', 'bids']: - return self._get_snippet_fetcher(mode) - else: - return self._get_app_fetcher() - - def _get_snippet_fetcher(self, mode: Literal['plugin', 'preset', 'bids']) -> 'SnippetsFetcher': - """Retrieves a configured SnippetsFetcher for the specified mode to handle fetching of snippets. - - Args: - mode (Literal['plugin', 'preset', 'bids']): The specific category of snippets to fetch. - - Returns: - SnippetsFetcher: A fetcher configured for fetching snippets of the specified type. - """ - return SnippetsFetcher(repos=self.config['snippets']['repo'], - mode=mode, - path=self._check_dir(mode)) - - def _get_app_fetcher(self) -> 'SnippetsFetcher': - """Retrieves a SnippetsFetcher for application handling. - - Returns: - SnippetsFetcher: A fetcher configured to handle application-specific tasks. - """ - return SnippetsFetcher(repos=self.config['app'], - mode='app') - - def _check_dir(self, type_: Literal['plugin', 'preset', 'bids']) -> Tuple['Path', bool]: - """Checks and prepares the directory for the specified snippet type, ensuring it exists. - - Args: - type_ (Literal['plugin', 'preset', 'bids']): The type of snippet for which the directory is checked. - - Returns: - Tuple[Path, bool]: A tuple containing the path to the directory and a cache flag indicating - if caching is necessary (True if so). - """ - path, cache = (self.config_dir / type_, False) if self.created else (self._tmpdir, True) - if not path.exists(): - path.mkdir() - return path, cache \ No newline at end of file diff --git a/brkraw/api/config/snippet/__init__.py b/brkraw/api/config/snippet/__init__.py deleted file mode 100644 index 350fa09..0000000 --- a/brkraw/api/config/snippet/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .plugin import PlugIn as PlugInSnippet -from .preset import Preset as PresetSnippet -from .bids import BIDS as BIDSSnippet -from .app import App as AppSnippet - -__all__ = ['PlugInSnippet', 'PresetSnippet', 'BIDSSnippet', 'AppSnippet'] \ No newline at end of file diff --git a/brkraw/api/config/snippet/app.py b/brkraw/api/config/snippet/app.py deleted file mode 100644 index c07126a..0000000 --- a/brkraw/api/config/snippet/app.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Snippet for App configuration""" - -from .base import Snippet - - -class App(Snippet): - def __init__(self): - raise NotImplementedError diff --git a/brkraw/api/config/snippet/base.py b/brkraw/api/config/snippet/base.py deleted file mode 100644 index 0874490..0000000 --- a/brkraw/api/config/snippet/base.py +++ /dev/null @@ -1,13 +0,0 @@ -"""BaseSnippet for provide platform for developing Snippet to configure and/or interface with other apps in BrkRaw ecosystem. -The current base is minimal structure as currently only PluginSnippet is available, will be expended to contains shared -method and attributes for Snippet classes -""" - -from brkraw.api.config.fetcher.base import Fetcher - - -class Snippet(Fetcher): - name: str - version: str - type: str - is_valid: bool diff --git a/brkraw/api/config/snippet/bids.py b/brkraw/api/config/snippet/bids.py deleted file mode 100644 index f4a79d0..0000000 --- a/brkraw/api/config/snippet/bids.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Snippet for BIDS converter""" - -from .base import Snippet - - -class BIDS(Snippet): - def __init__(self): - raise NotImplementedError \ No newline at end of file diff --git a/brkraw/api/config/snippet/loader.py b/brkraw/api/config/snippet/loader.py deleted file mode 100644 index b80999b..0000000 --- a/brkraw/api/config/snippet/loader.py +++ /dev/null @@ -1,84 +0,0 @@ -"""This module implements a ModuleLoader class that allows importing Python modules from either -a bytes object or a file path. - -It is designed to be used within PlugIn Snippets to dynamically load modules without requiring them to be -pre-installed or located in a standard file system path. -""" - -from __future__ import annotations -import sys -import importlib -from importlib.machinery import ModuleSpec -from importlib.abc import SourceLoader -from pathlib import Path -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import Union, Optional - - -class ModuleLoader(SourceLoader): - """A custom loader that imports a Python module from a bytes object or from a filepath. - - This loader supports dynamic execution of Python code, which can be especially useful in environments - where plugins or modules need to be loaded from non-standard locations or directly from memory. - - Attributes: - data (bytes, optional): The bytes object containing the source code of the module. - filepath (Path, optional): The file path to the module if it's not loaded from bytes. - """ - def __init__(self, module: Union[Path, bytes]): - """Initializes the ModuleLoader with either a path to the module or its bytes content. - - Args: - module (Union[Path, bytes]): The source of the module, either as a path or bytes. - """ - if isinstance(module, bytes): - self.data, self.filepath = module, None - else: - self.data, self.filepath = None, module - - def get_data(self, path: Optional[Path]): - """Fetches the module's data from bytes or a file. - - Args: - path (Path, optional): The path from which to load the module data if it's not already provided as bytes. - - Returns: - bytes: The raw data of the module. - """ - if self.data: - return self.data - elif path and Path(path).is_file(): - with open(path, 'rb') as file: - return file.read() - else: - raise FileNotFoundError(f"No such file: {path}") - - def get_filename(self, fullname: Optional[str] = None): - """Retrieves the filename of the module being loaded. - - Args: - fullname (str, optional): The full name of the module. - - Returns: - str: The filepath if it's defined, otherwise a dummy string for byte-loaded modules. - """ - return str(self.filepath) if self.filepath else "" - - def get_module(self, name: str) -> ModuleSpec: - """Creates and returns a module object from the provided data. - - This method constructs a module using the spec provided by this loader. - - Args: - name (str): The name of the module. - - Returns: - ModuleSpec: The module object loaded and ready for use. - """ - spec = ModuleSpec(name=name, loader=self, origin=self.get_filename()) - module = importlib.util.module_from_spec(spec) - self.exec_module(module) - sys.modules[name] = module - return module - \ No newline at end of file diff --git a/brkraw/api/config/snippet/plugin.py b/brkraw/api/config/snippet/plugin.py deleted file mode 100644 index 3d41f82..0000000 --- a/brkraw/api/config/snippet/plugin.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Provides a PlugInSnippet class that allows for plugin source code or code loaded in memory -to be imported as a Python module. This extends the functionality of the brkraw module at the -application level. - -This class facilitates the quick testing of code without the need for environment setup for plugin downloads. - -Changes: - 2024.5.1: Initial design and implementation of the PlugIn Snippet architecture. Initially tested for the tonifti app. - TODO: The PlugIn module will be a standard method to extend functionality across the entire apps. - -Author: Sung-Ho Lee (shlee@unc.edu) -""" - -from __future__ import annotations -import sys -import re -import yaml -import warnings -import subprocess as subproc -from pathlib import Path -from tqdm import tqdm -from .base import Snippet -from .loader import ModuleLoader -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import Tuple, Dict, Optional, Union - - -class PlugIn(Snippet): - """Handles the inspection and management of plugins, either locally or from remote sources. - - This class supports dynamic loading of plugins into memory for immediate use without the need for disk storage, - facilitating rapid development and testing of plugin functionalities. - """ - _remote: bool - _module_loaded: bool - _dependencies_tested: bool = False - _auth: Tuple[str, str] - _data: Dict = {} - _contents: Dict - - def __init__(self, - contents: dict, - auth: Optional[Tuple[str, str]] = None, - remote: bool = False): - """Initializes the plugin with specified contents, authentication for remote access, and remote status. - - Args: - contents (dict): Contains keys of path, dirs, and files, similar to os.walk but structured as a dictionary. - Each directory and file is also mapped as a key (filename) to a value (path or download_url). - auth (Tuple[str, str], optional): Credentials for using the GitHub API if needed. - remote (bool): True if the plugin is loaded remotely, False otherwise. - """ - self._auth = auth - self._contents = contents - self._remote = remote - self._content_parser() - - def set(self, skip_dependency_check: bool = False, *args, **kwargs): - """Sets the plugin's parameters and ensures dependencies are resolved and the module is loaded. - - This method acts as a setup routine by testing dependencies, downloading necessary files, - and dynamically importing the module and call module with given input arguments. - - Args: - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Returns: - The result of calling the imported module with provided arguments. - """ - if not self._module_loaded: - self.download() - if not self._dependencies_tested and not skip_dependency_check: - self.resolve_dependencies() - return self._imported_module(*args, **kwargs) - - def resolve_dependencies(self): - """Checks and installs any missing dependencies specified in the plugin's manifest file.""" - ptrn = r'(\w+)\s*(>=|<=|==|!=|>|<)\s*([0-9]+(?:\.[0-9]+)*)?' - deps = self._manifest['dependencies'] - print(f"++ Resolving python module dependencies...\n -> {deps}") - for module in tqdm(deps, desc=' -Dependencies', ncols=80): - if matched := re.match(ptrn, module): - self._status = None - self._pip_install(matched) - self._dependencies_tested = True - - def _pip_install(self, matched): - """Executes the pip install command for the matched dependency. - - Args: - matched (re.Match): A match object containing the module and version information. - - This method handles the pip installation process, directing output and errors appropriately. - """ - m, r, v = matched.groups() - cmd = [sys.executable, "-m", "pip", "install", f"{m}{r or ''}{v or ''}"] - displayed = 0 - with subproc.Popen(cmd, stdout=subproc.PIPE, stderr=subproc.PIPE, - text=True, bufsize=1, universal_newlines=True) as proc: - for l in proc.stdout: - if 'satisfied' in l.lower(): - if not displayed: - print(f" + Required already satisfied: {m}") - displayed += 1 - elif 'collecting' in l.lower(): - if not displayed: - print(f" + Installing '{m}' to resolve dependencies.") - displayed += 1 - proc.wait() - if proc.returncode != 0: - warnings.warn(f"'Errors during resolving dependencies': {''.join(proc.stderr)}") - - def download(self, dest: Optional[Path] = None, force: bool = False): - """Downloads the plugin to a specified destination or loads it directly into memory if no destination is provided. - This method also checks if the file already exists at the destination and optionally overwrites it based on the 'force' parameter. - - Args: - dest (Path, optional): The file system destination where the plugin files will be saved. - If None, files are loaded into memory. - force (bool, optional): If True, existing files at the destination will be overwritten. - Defaults to False. - """ - if not self._remote: - warnings.warn("Attempt to download failed: The plugin is already available " - "locally and cannot be downloaded again.", UserWarning) - return False - print(f"\n++ Downloading remote module to '{dest or 'memory'}'.") - files = self._contents['files'] if dest else self._get_module_files() - for filename, download_url in tqdm(files.items(), desc=' -Files', ncols=80): - if dest: - plugin_path = (dest / self.name) - plugin_path.mkdir(exist_ok=True) - plugin_file = plugin_path / filename - if plugin_file.exists() and not force: - warnings.warn(f"Warning: File '{filename}' already exists. Skipping download. Use 'force=True' to overwrite.", - UserWarning) - continue # Skip the download if file exists and force is False - with open(plugin_file, 'wb') as f: - for chunk in self._download_buffer(download_url, auth=self._auth): - f.write(chunk) - else: - # When downloading to memory - self._data[filename] = b''.join(self._download_buffer(download_url, auth=self._auth)) - self._module_loaded = True # Mark the module as loaded - - - def _get_module_files(self): - return {f:url for f, url in self._contents['files'].items() if f.endswith('.py')} - - def _content_parser(self): - """Parses the contents of the plugin based on its current state (local or remote). - - This method sets the plugin's parameters and determines its validity based on the availability - and correctness of the required data. - """ - if len(self._contents['files']) == 0: - self.is_valid = False - return None - self._parse_files() - try: - self._set_params() - except KeyError: - self.is_valid = False - return None - - def _set_params(self): - self.name = self._manifest['name'] - self.version = self._manifest['version'] - self.type = self._manifest['subtype'] - self.is_valid = True - self._module_loaded = False if self._remote else True - - def _parse_files(self): - """Processes the contents, loading the manifest if necessary.""" - for filename, file_loc in self._contents['files'].items(): - if filename.lower() == 'manifest.yaml': - self._load_manifest(file_loc) - - def _parse_remote(self): - """Processes the contents if the plugin is in a remote state, loading the manifest if necessary.""" - for filename, download_url in self._contents['files'].items(): - if filename.lower() == 'manifest.yaml': - self._load_manifest(download_url) - - def _load_manifest(self, file_loc: Union[str, Path]): - """Loads the plugin's manifest from a remote URL. - - Args: - download_url (str): The URL from which to download the manifest. - - This method fetches and parses the plugin's manifest file, setting flags based on the contents. - """ - if self._remote: - bytes_data = b''.join(self._download_buffer(file_loc, auth=self._auth)) - self._manifest = yaml.safe_load(bytes_data) - else: - with open(file_loc, 'r') as f: - self._manifest = yaml.safe_load(f) - if self._manifest['type'] != 'plugin': - warnings.warn(f"The type annotation for the '{self._manifest['name']}' plugin manifest is not set as 'plugin.' \ - This may cause the plugin to function incorrectly.") - self.is_valid = False - - @property - def _imported_module(self): - """Dynamically imports the module from loaded data. - - This method uses the information from the manifest to import the specified module and method dynamically. - - Returns: - The imported method from the module. - """ - source = self._manifest['source'] - f, c = source.split(':') - mloc = self._data[f] if self._remote else self._contents['files'][f] - loader = ModuleLoader(mloc) - module = loader.get_module(self.name) - return getattr(module, c) - - def __repr__(self): - if self.is_valid: - repr = f"PlugInSnippet<{self.type}>::{self.name}[{self.version}]" - if self._remote: - repr += '+InMemory' if self._module_loaded else '+Remote' - return repr - else: - return "PlugInSnippet::InValidPlugin" diff --git a/brkraw/api/config/snippet/preset.py b/brkraw/api/config/snippet/preset.py deleted file mode 100644 index 48474ad..0000000 --- a/brkraw/api/config/snippet/preset.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Snippet for Preset""" - -from .base import Snippet - - -class Preset(Snippet): - def __init__(self): - raise NotImplementedError \ No newline at end of file diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index 15cf738..1bed535 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -31,9 +31,10 @@ from pathlib import Path from dataclasses import dataclass from .scan import Scan +from brkraw import config from brkraw.api.pvobj import PvStudy from brkraw.api.analyzer.base import BaseAnalyzer -from brkraw.api.helper.recipe import Recipe +from xnippy.formatter import RecipeFormatter from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional @@ -154,21 +155,28 @@ def _process_header(self): Returns: dict: A dictionary containing structured information about the study, its scans, and reconstructions. """ - spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') + spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') # TODO:asdasd with open(spec_path, 'r') as f: spec = yaml.safe_load(f) - self._info = StudyHeader(header=Recipe(self, copy(spec)['study']).get(), scans=[]) + self._info = StudyHeader(header=RecipeFormatter(self, copy(spec)['study']).get(), + scans=[]) with warnings.catch_warnings(): warnings.simplefilter("ignore") for scan_id in self.avail: scanobj = self.get_scan(scan_id) scan_spec = copy(spec)['scan'] - scan_header = ScanHeader(scan_id=scan_id, header=Recipe(scanobj.info, scan_spec).get(), recos=[]) + scaninfo_targets = [scanobj.info, + scanobj.get_scaninfo(get_analyzer=True)] + scan_header = ScanHeader(scan_id=scan_id, + header=RecipeFormatter(scaninfo_targets, scan_spec).get(), + recos=[]) for reco_id in scanobj.avail: - recoinfo = scanobj.get_scaninfo(reco_id) + recoinfo_targets = [scanobj.get_scaninfo(reco_id=reco_id), + scanobj.get_scaninfo(reco_id=reco_id, get_analyzer=True)] reco_spec = copy(spec)['reco'] - reco_header = Recipe(recoinfo, reco_spec).get() - reco_header = RecoHeader(reco_id=reco_id, header=reco_header) if reco_header else None + reco_header = RecipeFormatter(recoinfo_targets, reco_spec).get() + reco_header = RecoHeader(reco_id=reco_id, + header=reco_header) if reco_header else None if reco_header: scan_header.recos.append(reco_header) self._info.scans.append(scan_header) diff --git a/brkraw/api/data/study.yaml b/brkraw/api/data/study.yaml index 9fc9512..0dfa198 100644 --- a/brkraw/api/data/study.yaml +++ b/brkraw/api/data/study.yaml @@ -1,3 +1,8 @@ +name: tonifti-studyinfo +type: recipe +subtype: studyinfo +version: 24.5.3 + study: date: - header.study_date diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py deleted file mode 100644 index fdba6c1..0000000 --- a/brkraw/api/helper/recipe.py +++ /dev/null @@ -1,112 +0,0 @@ -from __future__ import annotations -import re -import warnings -from collections import OrderedDict -from typing import TYPE_CHECKING -from .base import BaseHelper -if TYPE_CHECKING: - from typing import Optional, Dict, List, Any - from brkraw.api.analyzer import BaseAnalyzer - - -class Recipe(BaseHelper): - def __init__(self, target: 'BaseAnalyzer', recipe: dict, legacy: bool = False, - startup_scripts: Optional[List[str]] = None): - self.target = target - self.recipe = recipe - self.results = OrderedDict() - self.backward_comp = legacy - self.startup_scripts = startup_scripts or [] - self._parse_recipe() - - def _parse_recipe(self): - for key, value in self.recipe.items(): - if key == 'startup': - scripts = [s for s in value if s is not None] - self.startup_scripts.extend(scripts) - else: - if value := self._eval_value(value): - self.results[key] = value - - def _eval_value(self, value: Any): - if isinstance(value, str): - value = self._process_str(value) - elif isinstance(value, list): - value = self._process_list(value) - elif isinstance(value, dict): - value = self._process_dict(value) - return value - - def _legacy_parser(self, param_key: str): - for pars in ['acqp', 'method', 'visu_pars']: - value = getattr(self.target, pars).get(param_key) - if value is not None: - return value - return param_key - - def _process_str(self, str_obj: str): - if self.backward_comp: - return self._legacy_parser(str_obj) - ptrn = r'(?P^[a-zA-Z][a-zA-Z0-9_]*)\.(?P[a-zA-Z][a-zA-Z0-9_]*)' - if matched := re.match(ptrn, str_obj): - if hasattr(self.target, matched['attr']): - attr = getattr(self.target, matched['attr']) - return attr.get(matched['key'], None) - else: - return None - else: - return str_obj - - def _process_list(self, list_obj: List): - for c in list_obj: - processed = self._eval_value(c) - if processed is not None: - return processed - return None - - def _process_dict(self, dict_obj: Dict): - script_cmd = 'Equation' if self.backward_comp else 'script' - if script_cmd in dict_obj.keys(): - return self._process_dict_case_script(dict_obj, script_cmd) - elif 'key' in dict_obj.keys(): - return self._process_dict_case_pick_from_list(dict_obj) - else: - processed = {} - for key, value in dict_obj.items(): - if value := self._eval_value(value): - processed[key] = value - return processed if len(processed) else None - - def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): - script = dict_obj[script_cmd] - if self.startup_scripts: - for s in self.startup_scripts: - exec(s) - for key, value in dict_obj.items(): - if key != script_cmd: - value = self._eval_value(value) - if value == None: - return None - exec(f'global {key}') - try: - exec(f'{key} = {value}') - except NameError: - exec(f"{key} = '{value}'") - exec(f"output = {script}", globals(), locals()) - return locals()['output'] - - def _process_dict_case_pick_from_list(self, dict_obj: Dict): - key = dict_obj.pop('key') - value = self._process_str(key) - if not isinstance(value, list): - warnings.warn(f"The value returned from '{key}' is not of type 'list'.", UserWarning) - return None - if 'where' in dict_obj.keys(): - hint = self._eval_value(dict_obj.pop('where')) - return value.index(hint) if hint in value else None - elif 'idx' in dict_obj.keys(): - idx = self._eval_value(dict_obj.pop('idx')) - return value[idx] if idx < len(value) else None - - def get(self): - return self.results \ No newline at end of file diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index c24356b..bccfdd6 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -12,19 +12,18 @@ from __future__ import annotations import os -import zipfile +from zipfile import ZipFile from collections import OrderedDict, defaultdict from pathlib import Path from .parameters import Parameter -from brkraw.api.util.package import PathResolver +from xnippy.formatter import PathFormatter from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Union, List - from zipfile import ZipExtFile - from io import BufferedReader + from typing import Optional, List + from .types import PvFileBuffer -class BaseBufferHandler(PathResolver): +class BaseBufferHandler(PathFormatter): """Handles buffer management for file operations, ensuring all file streams are properly managed. This class provides context management for file buffers, allowing for easy and safe opening and closing @@ -33,7 +32,7 @@ class BaseBufferHandler(PathResolver): Attributes: _buffers (Union[List[BufferedReader], List[ZipExtFile]]): A list of file buffer objects. """ - _buffers: Union[List[BufferedReader], List[ZipExtFile]] = [] + _buffers: List[PvFileBuffer] = [] def close(self): """Closes all open file buffers managed by this handler.""" if self._buffers: @@ -122,7 +121,7 @@ def _fetch_zip(path: 'Path'): - 'files': A list of file names. - 'file_indexes': A list of file indexes. """ - with zipfile.ZipFile(path) as zip_file: + with ZipFile(path) as zip_file: contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': [], 'file_sizes': []}) for i, item in enumerate(zip_file.infolist()): if not item.is_dir(): @@ -162,7 +161,7 @@ def _open_as_fileobject(self, key: str): raise KeyError(f'Failed to load filename "{key}" from folder "{rel_path}".\n [{", ".join(files)}]') if file_indexes := self.contents.get('file_indexes'): - with zipfile.ZipFile(rootpath) as zf: + with ZipFile(rootpath) as zf: idx = file_indexes[files.index(key)] return zf.open(zf.namelist()[idx]) else: @@ -302,7 +301,7 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): "Please check the dataset and ensure the file is in the expected location.") @staticmethod - def _is_binary(fileobj: BufferedReader, bytes: int = 512): + def _is_binary(fileobj: PvFileBuffer, bytes: int = 512): """Determine if a file is binary by reading a block of data. Args: diff --git a/brkraw/api/pvobj/types.py b/brkraw/api/pvobj/types.py new file mode 100644 index 0000000..3d2812d --- /dev/null +++ b/brkraw/api/pvobj/types.py @@ -0,0 +1,24 @@ +from io import BufferedReader +from zipfile import ZipExtFile +from typing import Type +from typing import Union +from .pvscan import PvScan +from .pvstudy import PvStudy +from .pvreco import PvReco +from .pvfiles import PvFiles +from .parameters import Parameter + + +PvFileBuffer = Type[Union[BufferedReader, ZipExtFile]] + +PvStudyType = Type[PvStudy] + +PvScanType = Type[PvScan] + +PvRecoType = Type[PvReco] + +PvFilesType = Type[PvFiles] + +ParameterType = Type[Parameter] + +PvObjType = Type[Union[PvScan, PvReco, PvFiles]] \ No newline at end of file diff --git a/brkraw/app/backup/__init__.py b/brkraw/app/backup/__init__.py new file mode 100644 index 0000000..57ad8df --- /dev/null +++ b/brkraw/app/backup/__init__.py @@ -0,0 +1,41 @@ +"""provide all conventional function with backward compatibility, but also provide function to send file via FTP server +as well as compress only file needed +""" + +import argparse +from brkraw import __version__ + +def main(): + """main script allows convert brkraw + provide list function of all available converting mode (including plugin) + """ + parser = argparse.ArgumentParser(prog='brk_tonifti', + description="BrkRaw command-line interface for NifTi conversion") + parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) + + subparsers = parser.add_subparsers(title='Sub-commands', + description='To run this command, you must specify one of the functions listed' + 'below next to the command. For more information on each function, ' + 'use -h next to the function name to call help document.', + help='description', + dest='function', + metavar='command') + + input_str = "input raw Bruker data" + input_dir_str = "input directory that contains multiple raw Bruker data" + output_dir_str = "output directory name" + output_fnm_str = "output filename" + bids_opt = "create a JSON file contains metadata based on BIDS recommendation" + + info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') + + scan = subparsers.add_parser("scan", help='Convert a single raw Bruker data into NifTi file(s)') + study = subparsers.add_parser("study", help="Convert All raw Bruker data located in the input directory") + dataset = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") + + # info + info.add_argument("input", help=input_str, type=str) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/brkraw/app/backup/cache.py b/brkraw/app/backup/cache.py new file mode 100644 index 0000000..04cad7d --- /dev/null +++ b/brkraw/app/backup/cache.py @@ -0,0 +1,175 @@ +from brkraw.app.tonifti import StudyToNifti + +import os +import datetime + + + +class NamedTuple(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +class BackupCache: + def __init__(self): + self._init_dataset_class() + + def logging(self, message, method): + now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + self.log_data.append(NamedTuple(datetime=now, method=method, message=message)) + + @property + def num_raw(self): + return len(self.raw_data) + #TODO: need to check if the space enough to perform backup, as well as handle the crash event + #during the backup (the cache updated even the backup failed) + + @property + def num_arc(self): + return len(self.arc_data) + + def _init_dataset_class(self): + # dataset + self.raw_data = [] + self.arc_data = [] + self.log_data = [] + + def get_rpath_obj(self, path, by_arc=False): + if len(self.raw_data): + if by_arc: + data_pid = [b.data_pid for b in self.arc_data if b.path == path] + if len(data_pid): + rpath_obj = [r for r in self.raw_data if r.data_pid == data_pid[0]] + if len(rpath_obj): + return rpath_obj[0] + else: + return None + else: + return None + else: + rpath_obj = [r for r in self.raw_data if r.path == path] + if len(rpath_obj): + return rpath_obj[0] + else: + return None + else: + return None + + def get_bpath_obj(self, path, by_raw=False): + if len(self.arc_data): + if by_raw: + r = self.get_rpath_obj(path) + if r is None: + return [] + else: + return [b for b in self.arc_data if b.data_pid == r.data_pid] + else: + data_pid = [b for b in self.arc_data if b.path == path][0].data_pid + return [b for b in self.arc_data if b.data_pid == data_pid] + else: + return [] + + def isin(self, path, raw=True): + if raw: + list_data = self.raw_data + else: + list_data = self.arc_data + _history = [d for d in list_data if d.path == path] + if len(_history): + return True + else: + return False + + def set_raw(self, dirname, raw_dir, removed=False): + # rawobj: data_pid, path, garbage, removed, backup + if not removed: + dir_path = os.path.join(raw_dir, dirname) + if not self.isin(dirname, raw=True): # continue if the path is not saved in this cache obj + if os.path.isdir(dir_path): + raw = StudyToNifti(dir_path) + garbage = False if raw.is_pvdataset else True + rawobj = NamedTuple(data_pid=self.num_raw, + path=dirname, + garbage=garbage, + removed=removed, + backup=False) + self.raw_data.append(rawobj) + else: + self.logging('{} is not a valid directory. [raw dataset must be a directory]'.format(dir_path), + 'set_raw') + else: + rawobj = NamedTuple(data_pid=self.num_raw, + path=dirname, + garbage=None, + removed=removed, + backup=True) + self.raw_data.append(rawobj) + + def set_arc(self, arc_fname, arc_dir, raw_dir): + # arcobj: data_pid, path, garbage, crashed, issued + arc_path = os.path.join(arc_dir, arc_fname) + + if not self.isin(arc_fname, raw=False): # continue if the path is not saved in this cache obj + issued = False + try: + arc = StudyToNifti(arc_path) + raw_dname = arc.pvobj.path + raw_path = os.path.join(raw_dir, raw_dname) + garbage = False if arc.is_pvdataset else True + crashed = False + except: + self.logging('{} is crashed.'.format(arc_path), + 'set_arc') + arc = None + raw_dname = None + raw_path = None + garbage = True + crashed = True + + if raw_dname != None: + r = self.get_rpath_obj(raw_dname) + else: + r = None + + if r is None: + raw_dname = os.path.splitext(arc_fname)[0] + self.set_raw(raw_dname, raw_dir, removed=True) + r = self.get_rpath_obj(raw_dname) + r.garbage = garbage + if crashed: + issued = True + else: + if arc is None: + issued = True + else: + if not r.removed: + if not r.backup: + pass + else: + raw = StudyToNifti(raw_path) + if raw.num_recos != arc.num_recos: + issued = True + arcobj = NamedTuple(data_pid=r.data_pid, + path=arc_fname, + garbage=garbage, + crashed=crashed, + issued=issued) + if not crashed: + if not issued: + # backup completed data must has no issue + r.backup = True + + self.arc_data.append(arcobj) + + def is_duplicated(self, file_path, by_arc=False): + if by_arc: + b = self.get_bpath_obj(file_path, by_raw=False) + else: + b = self.get_bpath_obj(file_path, by_raw=True) + if len(b) > 1: + return True + else: + return False + + + diff --git a/brkraw/app/backup/handler.py b/brkraw/app/backup/handler.py new file mode 100644 index 0000000..c48722d --- /dev/null +++ b/brkraw/app/backup/handler.py @@ -0,0 +1,477 @@ +import os +from brkraw.app.tonifti import StudyToNifti +from brkraw.api.config.utils.functools import get_dirsize, \ + get_filesize, yes_or_no, print_internal_error, TimeCounter +import sys +import datetime +import tqdm +import pickle +import zipfile +from .cache import BackupCache +import pickle +import getpass + + +_bar_fmt = '{l_bar}{bar:20}{r_bar}{bar:-20b}' +_user = getpass.getuser() +_width = 80 +_line_sep_1 = '-' * _width +_line_sep_2 = '=' * _width +_empty_sep = '' + +class BackupCacheHandler: + def __init__(self, raw_path, backup_path, fname='.brk-backup_cache'): + """ Handler class for backup data + + Args: + raw_path: path for raw dataset + backup_path: path for backup dataset + fname: file name to pickle cache data + """ + self._cache = None + self._rpath = os.path.expanduser(raw_path) + self._apath = os.path.expanduser(backup_path) + self._cache_path = os.path.join(self._apath, fname) + self._load_pickle() + # self._parse_info() + + def _load_pickle(self): + if os.path.exists(self._cache_path): + try: + with open(self._cache_path, 'rb') as cache: + self._cache = pickle.load(cache) + except EOFError: + os.remove(self._cache_path) + self._cache = BackupCache() + else: + self._cache = BackupCache() + self._save_pickle() + + def _save_pickle(self): + with open(self._cache_path, 'wb') as f: + pickle.dump(self._cache, f) + + def logging(self, message, method): + method = 'Handler.{}'.format(method) + self._cache.logging(message, method) + + @property + def is_duplicated(self): + return self._cache.is_duplicated + + @property + def get_rpath_obj(self): + return self._cache.get_rpath_obj + + @property + def get_bpath_obj(self): + return self._cache.get_bpath_obj + + @property + def arc_data(self): + return self._cache.arc_data + + @property + def raw_data(self): + return self._cache.raw_data + + @property + def scan(self): + return self._parse_info + + def _parse_info(self): + print('\n-- Parsing metadata from the raw and archived directories --') + list_of_raw = sorted([d for d in os.listdir(self._rpath) if + os.path.isdir(os.path.join(self._rpath, d)) and 'import' not in d]) + list_of_brk = sorted([d for d in os.listdir(self._apath) if + (os.path.isfile(os.path.join(self._apath, d)) and + (d.endswith('zip') or d.endswith('PvDatasets')))]) + + # parse dataset + print('\nScanning raw datasets and update cache...') + for r in tqdm.tqdm(list_of_raw, bar_format=_bar_fmt): + self._cache.set_raw(r, raw_dir=self._rpath) + self._save_pickle() + + print('\nScanning archived datasets and update cache...') + for b in tqdm.tqdm(list_of_brk, bar_format=_bar_fmt): + self._cache.set_arc(b, arc_dir=self._apath, raw_dir=self._rpath) + self._save_pickle() + + # update raw dataset information (raw dataset cache will remain even its removed) + print('\nScanning raw dataset cache...') + for r in tqdm.tqdm(self.raw_data[:], bar_format=_bar_fmt): + if r.path != None: + if not os.path.exists(os.path.join(self._rpath, r.path)): + if not r.removed: + r.removed = True + self._save_pickle() + + print('\nReviewing the cached information...') + for b in tqdm.tqdm(self.arc_data[:], bar_format=_bar_fmt): + arc_path = os.path.join(self._apath, b.path) + if not os.path.exists(arc_path): # backup dataset is not existing, remove the cache + self.arc_data.remove(b) + else: # backup dataset is existing then check status again + if b.issued: # check if the issue has benn resolved. + if b.crashed: # check if the dataset re-backed up. + if zipfile.is_zipfile(arc_path): + b.crashed = False # backup success! + b.issued = False if self.is_same_as_raw(b.path) else True + if b.issued: + if b.garbage: + if StudyToNifti(arc_path).is_pvdataset: + b.garbage = False + # else the backup dataset it still crashed. + else: # the dataset has an issue but not crashed, so check if the issue has been resolved. + b.issued = False if self.is_same_as_raw(b.path) else True + if not b.issued: # if issue resolved + r = self.get_rpath_obj(b.path, by_arc=True) + r.backup = True + else: # if no issue with the dataset, do nothing. + r = self.get_rpath_obj(b.path, by_arc=True) + if not r.backup: + r.backup = True + self._save_pickle() + + def is_same_as_raw(self, filename): + arc = StudyToNifti(os.path.join(self._apath, filename)) + if arc.pvobj.path != None: + raw_path = os.path.join(self._rpath, arc.pvobj.path) + if os.path.exists(raw_path): + raw = StudyToNifti(raw_path) + return arc.num_recos == raw.num_recos + else: + return None + else: + return None + + def get_duplicated(self): + duplicated = dict() + for b in self.arc_data: + if self.is_duplicated(b.path, by_arc=True): + rpath = self.get_rpath_obj(b.path, by_arc=True).path + if rpath in duplicated.keys(): + duplicated[rpath].append(b.path) + else: + duplicated[rpath] = [b.path] + else: + pass + return duplicated + + def get_list_for_backup(self): + return [r for r in self.get_incompleted() if not r.garbage] + + def get_issued(self): + return [b for b in self.arc_data if b.issued] + + def get_crashed(self): + return [b for b in self.arc_data if b.crashed] + + def get_incompleted(self): + return [r for r in self.raw_data if not r.backup] + + def get_completed(self): + return [r for r in self.raw_data if r.backup] + + def get_garbage(self): + return [b for b in self.arc_data if b.garbage] + + @staticmethod + def _gen_header(title, width=_width): + lines = [] + gen_by = 'Generated by {}'.format(_user).rjust(width) + + lines.append(_empty_sep) + lines.append(_line_sep_2) + lines.append(_empty_sep) + lines.append(title.center(width)) + lines.append(gen_by) + lines.append(_line_sep_2) + lines.append(_empty_sep) + return lines + + def _get_backup_status(self): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + lines = self._gen_header('Report of the status of archived data [{}]'.format(now)) + list_need_to_be_backup = self.get_list_for_backup()[:] + total_list = len(list_need_to_be_backup) + if len(list_need_to_be_backup): + lines.append('>> The list of raw data need to be archived.') + lines.append('[Note: The list exclude the raw data does not contain any binary file]') + lines.append(_line_sep_1) + lines.append('{}{}'.format('Rawdata Path'.center(_width-10), 'Size'.rjust(10))) + for r in list_need_to_be_backup: + if len(r.path) > _width-10: + path_name = '{}... '.format(r.path[:_width-14]) + else: + path_name = r.path + raw_path = os.path.join(self._rpath, r.path) + dir_size, unit = get_dirsize(raw_path) + if unit == 'B': + dir_size = '{} {}'.format(dir_size, unit).rjust(10) + else: + dir_size = '{0:.2f}{1}'.format(dir_size, unit).rjust(10) + lines.append('{}{}'.format(path_name.ljust(_width-10), dir_size)) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + list_issued = self.get_issued() + total_list += len(list_issued) + if len(list_issued): + lines.append('>> Failed or incompleted archived data.') + lines.append('[Note: The listed data are either crashed or incompleted]') + lines.append(_line_sep_1) + lines.append('{}{}{}'.format('Archived Path'.center(60), + 'Condition'.rjust(10), + 'Size'.rjust(10))) + for b in self.get_issued(): + if len(b.path) > _width-20: + path_name = '{}... '.format(b.path[:_width-24]) + else: + path_name = b.path + arc_path = os.path.join(self._apath, b.path) + file_size, unit = get_filesize(arc_path) + if b.crashed: + raw_path = self.get_rpath_obj(b.path, by_arc=True).path + if raw_path is None: + condition = 'Failed' + else: + condition = 'Crashed' + else: + condition = 'Issued' + if unit == 'B': + file_size = '{} {}'.format(file_size, unit).rjust(10) + else: + file_size = '{0:.2f}{1}'.format(file_size, unit).rjust(10) + lines.append('{}{}{}'.format(path_name.ljust(_width-20), + condition.center(10), + file_size)) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + list_duplicated = self.get_duplicated() + total_list += len(list_duplicated) + if len(list_duplicated.keys()): + lines.append('>> List of duplicated archived data.') + lines.append('[Note: The listed raw data has been archived into multiple files]') + lines.append(_line_sep_1) + lines.append('{} {}'.format('Raw Path'.center(int(_width/2)-1), + 'Archived'.center(int(_width/2)-1))) + for rpath, bpaths in list_duplicated.items(): + if rpath is None: + rpath = '-- Removed --' + if len(rpath) > int(_width/2)-1: + rpath = '{}... '.format(rpath[:int(_width/2)-5]) + for i, bpath in enumerate(bpaths): + if len(bpath) > int(_width/2)-1: + bpath = '{}... '.format(bpath[:int(_width/2)-5]) + if i == 0: + lines.append('{}:-{}'.format(rpath.ljust(int(_width/2)-1), + bpath.ljust(int(_width/2)-1))) + else: + lines.append('{} -{}'.format(''.center(int(_width/2)-1), + bpath.ljust(int(_width/2)-1))) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + if total_list == 0: + lines.append(_empty_sep) + lines.append('The status of archived data is up-to-date...'.center(80)) + lines.append(_empty_sep) + lines.append(_line_sep_1) + return '\n'.join(lines) + + def print_status(self, fobj=sys.stdout): + summary = self._get_backup_status() + print(summary, file=fobj) + + def print_completed(self, fobj=sys.stdout): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + lines = self._gen_header('List of archived dataset [{}]'.format(now)) + list_of_completed = self.get_completed() + if len(list_of_completed): + lines.append(_line_sep_1) + lines.append('{}{}{}'.format('Rawdata Path'.center(_width - 20), + 'Removed'.rjust(10), + 'Archived'.rjust(10))) + for r in list_of_completed: + if len(r.path) > _width - 20: + path_name = '{}... '.format(r.path[:_width - 24]) + else: + path_name = r.path + removed = 'True' if r.removed else 'False' + archived = 'True' if r.backup else 'False' + lines.append('{}{}{}'.format(path_name.ljust(_width - 20), + removed.center(10), + archived.center(10))) + lines.append(_line_sep_1) + lines.append(_empty_sep) + else: + lines.append(_empty_sep) + lines.append('No archived data...'.center(80)) + lines.append(_empty_sep) + lines.append(_line_sep_1) + summary = '\n'.join(lines) + print(summary, file=fobj) + + def clean(self): + print('\n[Warning] The archived data that contains any issue will be deleted by this command ' + 'and it cannot be revert.') + print(' Prior to run this, please update the cache for data status using "review" function.\n') + ans = yes_or_no('Are you sure to continue?') + + if ans: + list_data = dict(issued=self.get_issued()[:], + garbage=self.get_garbage()[:], + crashed=self.get_crashed()[:], + duplicated=self.get_duplicated().copy()) + for label, dset in list_data.items(): + if label == 'duplicated': + print('\nStart removing {} archived data...'.format(label.upper())) + if len(dset.items()): + for raw_dname, arcs in dset.items(): + if raw_dname != None: + raw_path = os.path.join(self._rpath, raw_dname) + if os.path.exists(raw_path): + r_size, r_unit = get_dirsize(raw_path) + r_size = '{0:.2f} {1}'.format(r_size, r_unit) + else: + r_size = 'Removed' + if len(raw_dname) < 60: + raw_dname = '{}...'.format(raw_dname[:56]) + else: + r_size = 'Removed' + raw_dname = 'No name' + print('Raw dataset: [{}] {}'.format(raw_dname.ljust(60), r_size.rjust(10))) + num_dup = len(arcs) + dup_list = [' +-{}'] * num_dup + print('\n'.join(dup_list).format(*arcs)) + for arc_fname in arcs: + path_to_clean = os.path.join(self._apath, arc_fname) + ans_4rm = yes_or_no(' - Are you sure to remove [{}] ?\n '.format(arc_fname)) + if ans_4rm: + try: + os.remove(path_to_clean) + a = self.get_bpath_obj(arc_fname) + if len(a): + self.arc_data.remove(a[0]) + except OSError: + error = NotImplementedError(path_to_clean) + self.logging(error.message, 'clean') + print(' Failed! The file is locked.') + else: + raise NotImplementedError + else: + if len(dset): + print('\nStart removing {} archived data...'.format(label.upper())) + + def ask_to_remove(): + ans_4rm = yes_or_no(' - Are you sure to remove [{}] ?\n '.format(path_to_clean)) + if ans_4rm: + try: + os.remove(path_to_clean) + self.arc_data.remove(a) + except OSError: + error = NotImplementedError(path_to_clean) + self.logging(error.message, 'clean') + print(' Failed! The file is locked.') + else: + raise NotImplementedError + for a in dset: + path_to_clean = os.path.join(self._apath, a.path) + if label == 'issued': + if a.garbages or a.crashed: + pass + else: + ask_to_remove() + elif label == 'garbage': + if a.crashed: + pass + else: + ask_to_remove() + self._save_pickle() + + def backup(self, fobj=sys.stdout): + list_raws = self.get_list_for_backup()[:] + list_issued = self.get_issued()[:] + print('\nStarting backup for raw data not listed in the cache...') + self.logging('Archiving process starts...', 'backup') + + for i, dlist in enumerate([list_raws, list_issued]): + if i == 0: + print('\n[step1] Archiving the raw data that has not been archived.') + self.logging('Archive the raw data has not been archived...', 'backup') + elif i == 1: + print('\n[step2] Archiving the data that has issued on archived data.') + self.logging('Archive the raw data contains any issue...', 'backup') + + for r in tqdm.tqdm(dlist, unit=' dataset(s)', bar_format=_bar_fmt): + run_backup = True + raw_path = os.path.join(self._rpath, r.path) + arc_path = os.path.join(self._apath, '{}.zip'.format(r.path)) + tmp_path = os.path.join(self._apath, '{}.part'.format(r.path)) + if os.path.exists(raw_path): + if os.path.exists(tmp_path): + print(' -[{}] is detected and removed...'.format(tmp_path), file=fobj) + os.unlink(tmp_path) + if os.path.exists(arc_path): + if not zipfile.is_zipfile(arc_path): + print(' -[{}] is crashed file, removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + else: + arc = StudyToNifti(arc_path) + raw = StudyToNifti(raw_path) + if arc.is_pvdataset: + if arc.num_recos != raw.num_recos: + print(' - [{}] is mismatching with the corresponding raw data, ' + 'removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + else: + run_backup = False + else: + print(' - [{}] is mismatching with the corresponding raw data, ' + 'removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + if run_backup: + print('\n :: Compressing [{}]...'.format(raw_path), file=fobj) + # Compressing + timer = TimeCounter() + try: # exception handling in case compression is failed + with zipfile.ZipFile(tmp_path, 'w') as zip: + # prepare file counters for use of tqdm + file_counter = 0 + for _ in os.walk(raw_path): + file_counter += 1 + + for i, (root, dirs, files) in tqdm.tqdm(enumerate(os.walk(raw_path)), + bar_format=_bar_fmt, + total=file_counter, + unit=' file(s)'): + splitted_root = root.split(os.sep) + if i == 0: + root_idx = splitted_root.index(r.path) + for f in files: + arc_name = os.sep.join(splitted_root[root_idx:] + [f]) + zip.write(os.path.join(root, f), arcname=arc_name) + print(' - [{}] is created.'.format(os.path.basename(arc_path)), file=fobj) + + except Exception: + print_internal_error(fobj) + error = NotImplementedError(raw_path) + self.logging(error.message, 'backup') + raise error + + print(' - processed time: {} sec'.format(timer.time()), file=fobj) + + # Backup validation + if not os.path.exists(tmp_path): # Check if the file is generated + error = NotImplementedError(raw_path) + self.logging(error.message, 'backup') + raise error + else: + try: + os.rename(tmp_path, arc_path) + except: + print_internal_error(fobj) + raise NotImplementedError \ No newline at end of file diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index 31b1e38..942992f 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -2,75 +2,81 @@ dependency: bids, plugin """ -from brkraw import __version__ -from .base import BasePlugin, PvScan, PvReco, PvFiles -from .study import StudyToNifti, ScanToNifti -import argparse +from brkraw import __version__, config +from xnippy.module import ModuleCommander +from brkraw.app.tonifti.plugin import ToNiftiPlugin, PvScan, PvReco, PvFiles +from brkraw.app.tonifti.study import StudyToNifti, ScanToNifti -__all__ = ['BasePlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] +tonifti_config = config.config['app']['tonifti'] +# tonifti_presets = config.get_fetcher('preset') -def main(): - """main script allows convert brkraw - provide list function of all available converting mode (including plugin) - """ - parser = argparse.ArgumentParser(prog='brk_tonifti', - description="BrkRaw command-line interface for NifTi conversion") - parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) +__all__ = ['ToNiftiPlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] - subparsers = parser.add_subparsers(title='Sub-commands', - description='To run this command, you must specify one of the functions listed' - 'below next to the command. For more information on each function, ' - 'use -h next to the function name to call help document.', - help='description', - dest='function', - metavar='command') +# def main(): +# """main script allows convert brkraw +# provide list function of all available converting mode (including plugin) +# """ +# parser =ArgParser(prog='brkraw-tonifti', +# description="BrkRaw command-line interface for converting to NifTi1 format") +# parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) - input_str = "input raw Bruker data" - input_dir_str = "input directory that contains multiple raw Bruker data" - output_dir_str = "output directory name" - output_fnm_str = "output filename" - bids_opt = "create a JSON file contains metadata based on BIDS recommendation" +# subparsers = parser.add_subparsers(title='Sub-commands', +# description='To run this command, you must specify one of the functions listed' +# 'below next to the command. For more information on each function, ' +# 'use -h next to the function name to call help document.', +# help='description', +# dest='function', +# metavar='command') - info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') - - scan = subparsers.add_parser("scan", help='Convert a single raw Bruker data into NifTi file(s)') - study = subparsers.add_parser("study", help="Convert All raw Bruker data located in the input directory") - dataset = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") +# input_str = "input raw Bruker data" +# input_dir_str = "input directory that contains multiple raw Bruker data" +# output_dir_str = "output directory name" +# output_fnm_str = "output filename" +# bids_opt = "create a JSON file contains metadata based on BIDS recommendation" - # info - info.add_argument("input", help=input_str, type=str) +# info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') +# dataset = subparsers.add_parser("dataset", help="Convert a multiple PvDatasets into NifTi file(s)") +# study = subparsers.add_parser("study", help="Convert a whole Scans in PvDataset into NifTi file(s)") +# scan = subparsers.add_parser("scan", help='Convert a Scan folder in PvDataset into NifTi file(s)') +# reco = subparsers.add_parser("reco", help='Convert a Reco folder in PvDataset into NifTi file(s)') +# files = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") +# plugin = - # tonii - scan.add_argument("input", help=input_str, type=str) - scan.add_argument("-b", "--bids", help=bids_opt, action='store_true') - scan.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False) - scan.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str) - scan.add_argument("-r", "--recoid", help="RECO ID (default=1), " - "option to specify a particular reconstruction id to convert", - type=int, default=1) - scan.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ - "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) - scan.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ - "the position variable can be defiend as _, " + \ - "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) - scan.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') - scan.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') - scan.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') - scan.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True) +# # info +# info.add_argument("input", help=input_str, type=str) - # tonii_all - dataset.add_argument("input", help=input_dir_str, type=str) - dataset.add_argument("-o", "--output", help=output_dir_str, type=str) - dataset.add_argument("-b", "--bids", help=bids_opt, action='store_true') - dataset.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ - "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) - dataset.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ - "the position variable can be defiend as _, " + \ - "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) - dataset.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') - dataset.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') - dataset.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') - dataset.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true') +# # tonii +# scan.add_argument("input", help=input_str, type=str) +# scan.add_argument("-b", "--bids", help=bids_opt, action='store_true') +# scan.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False) +# scan.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str) +# scan.add_argument("-r", "--recoid", help="RECO ID (default=1), " +# "option to specify a particular reconstruction id to convert", +# type=int, default=1) +# scan.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ +# "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) +# scan.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ +# "the position variable can be defiend as _, " + \ +# "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) +# scan.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') +# scan.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') +# scan.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') +# scan.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True) + +# # tonii_all +# dataset.add_argument("input", help=input_dir_str, type=str) +# dataset.add_argument("-o", "--output", help=output_dir_str, type=str) +# dataset.add_argument("-b", "--bids", help=bids_opt, action='store_true') +# dataset.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ +# "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) +# dataset.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ +# "the position variable can be defiend as _, " + \ +# "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) +# dataset.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') +# dataset.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') +# dataset.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') +# dataset.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true') if __name__ == '__main__': - main() \ No newline at end of file + # main() + print(config) \ No newline at end of file diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 162ac97..4e28efe 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -1,26 +1,22 @@ from __future__ import annotations import warnings import numpy as np -from pathlib import Path +from brkraw import config from nibabel.nifti1 import Nifti1Image from .header import Header -from brkraw import config from brkraw.api.pvobj.base import BaseBufferHandler -from brkraw.api.pvobj import PvScan, PvReco, PvFiles from brkraw.api.data import Scan -from brkraw.api.config.snippet import PlugInSnippet +from xnippy.snippet import PlugInSnippet from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import List, Optional, Union, Literal - from brkraw.api.config.manager import Manager as ConfigManager - - -XYZT_UNITS = \ - dict(EPI=('mm', 'sec')) + from typing import Optional, Union, Literal + from typing import List + from numpy.typing import NDArray + from xnippy.types import ConfigManagerType class BaseMethods(BaseBufferHandler): - config: ConfigManager = config + config: ConfigManagerType = config def set_scale_mode(self, scale_mode: Optional[Literal['header', 'apply']] = None): @@ -82,12 +78,14 @@ def get_affine_dict(scanobj: 'Scan', reco_id: Optional[int] = None, } @staticmethod - def get_nifti1header(scanobj: 'Scan', reco_id: Optional[int] = None, - scale_mode: Optional[Literal['header', 'apply']] = None): + def update_nifti1header(scanobj: 'Scan', + nifti1image: 'Nifti1Image', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): if reco_id: scanobj.set_scaninfo(reco_id) scale_mode = scale_mode or 'header' - return Header(scanobj.info, scale_mode).get() + return Header(scaninfo=scanobj.info, nifti1image=nifti1image, scale_mode=scale_mode).get() @staticmethod def get_nifti1image(scanobj: 'Scan', @@ -96,34 +94,13 @@ def get_nifti1image(scanobj: 'Scan', subj_type: Optional[str] = None, subj_position: Optional[str] = None, plugin: Optional[Union['PlugInSnippet', str]] = None, - plugin_kws: Optional[dict] = None) -> Union['Nifti1Image', List['Nifti1Image']]: + plugin_kws: Optional[dict] = None) -> Optional[Union['Nifti1Image', List['Nifti1Image']]]: if plugin: - if isinstance(plugin, str): - not_available = False - fetcher = config.get_fetcher('plugin') - # check plugin available on local - if fetcher.is_cache: - # No plugin downloaded, check on remote - if available := [p for p in fetcher.remote if p.name == plugin]: - plugin = available.pop() - else: - not_available = True - else: - if available := [p for p in fetcher.local if p.name == plugin]: - plugin = available.pop() - else: - not_available = True - if isinstance(plugin, PlugInSnippet) and plugin.type == 'tonifti': - with plugin.set(pvobj=scanobj.pvobj, **plugin_kws) as p: - dataobj = p.get_dataobj() - affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) - header = p.get_nifti1header() + if nifti1image := BaseMethods._bypass_method_via_plugin(scanobj=scanobj, + subj_type=subj_type, subj_position=subj_position, + plugin=plugin, plugin_kws=plugin_kws): + return nifti1image else: - not_available = True - if not_available: - warnings.warn("Failed. Given plugin not available, please install local plugin or use from available on " - f"remote repository. -> {[p.name for p in fetcher.remote]}", - UserWarning) return None else: scale_mode = scale_mode or 'header' @@ -135,58 +112,75 @@ def get_nifti1image(scanobj: 'Scan', reco_id=reco_id, subj_type=subj_type, subj_position=subj_position) - header = BaseMethods.get_nifti1header(scanobj=scanobj, - reco_id=reco_id, - scale_mode=scale_mode) + return BaseMethods._assemble_nifti1image(dataobj, affine) + @staticmethod + def _bypass_method_via_plugin(scanobj: 'Scan', + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, + plugin_kws: Optional[dict] = None) -> Optional[Nifti1Image]: + if isinstance(plugin, str): + plugin = BaseMethods._get_plugin_snippets_by_name(plugin) + if isinstance(plugin, PlugInSnippet) and plugin.type == 'tonifti': + print(f'++ Installed PlugIn: {plugin}') + with plugin.set(pvobj=scanobj.pvobj, **plugin_kws) as p: + nifti1image = p.get_nifti1image(subj_type=subj_type, subj_position=subj_position) + return nifti1image + else: + fetcher = config.get_fetcher('plugin') + warnings.warn("Failed. Given plugin not available, " + "please install local plugin or use from available on " + f"remote repository. -> {[p.name for p in fetcher.remote]}", + UserWarning) + return None + + @staticmethod + def _get_plugin_snippets_by_name(plugin: str): + fetcher = config.get_fetcher('plugin') + if not fetcher.is_cache: + plugin = BaseMethods._filter_snippets_by_name(plugin, fetcher.local) + if fetcher.is_cache or not isinstance(plugin, PlugInSnippet): + plugin = BaseMethods._filter_snippets_by_name(plugin, fetcher.remote) + return plugin + + @staticmethod + def _filter_snippets_by_name(name:str, snippets: list): + if filtered := [s for s in snippets if s.name == name]: + return filtered[0] + else: + return name + + @staticmethod + def _assemble_nifti1image(scanobj: 'Scan', + dataobj: NDArray, + affine: NDArray, + scale_mode: Optional[Literal['header', 'apply']] = None): if isinstance(dataobj, list): # multi-dataobj (e.g. msme) - affine = affine if isinstance(affine, list) else [affine for _ in range(len(dataobj))] - return [Nifti1Image(dataobj=dobj, affine=affine[i], header=header) for i, dobj in enumerate(dataobj)] + niis = BaseMethods._assemble_msme(dataobj, affine) + return [BaseMethods.update_nifti1header(nifti1image=nii, + scanobj=scanobj, + scale_mode=scale_mode) for nii in niis] if isinstance(affine, list): # multi-slicepacks - return [Nifti1Image(dataobj[:,:,i,...], affine=aff, header=header) for i, aff in enumerate(affine)] - return Nifti1Image(dataobj=dataobj, affine=affine, header=header) - - -class BasePlugin(Scan, BaseMethods): - """Base class for handling plugin operations, integrating scanning and basic method functionalities. + niis = BaseMethods._assemble_ms(dataobj, affine) + return niis + nii = Nifti1Image(dataobj=dataobj, affine=affine) + return BaseMethods.update_nifti1header(nifti1image=nii, + scanobj=scanobj, + scale_mode=scale_mode) - This class initializes plugin operations with options for verbose output and integrates functionalities - from the Scan and BaseMethods classes. It provides methods to close the plugin and clear any cached data. - - Args: - pvobj (Union['PvScan', 'PvReco', 'PvFiles']): An object representing the PV (ParaVision) scan, reconstruction, - or file data, which is central to initializing the plugin operations. - verbose (bool): Flag to enable verbose output during operations, defaults to False. - **kwargs: Additional keyword arguments that are passed to the superclass. - - Attributes: - verbose (bool): Enables or disables verbose output. - """ - def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], - verbose: bool=False, **kwargs): - """Initializes the BasePlugin with a PV object, optional verbosity, and other parameters. + @staticmethod + def _assemble_msme(dataobj: NDArray, affine: NDArray): + affine = affine if isinstance(affine, list) else [affine for _ in range(len(dataobj))] + return [Nifti1Image(dataobj=dobj, affine=affine[i]) for i, dobj in enumerate(dataobj)] - Args: - pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The primary object associated with ParaVision operations. - verbose (bool, optional): If True, enables verbose output. Defaults to False. - **kwargs: Arbitrary keyword arguments passed to the superclass initialization. - """ - super().__init__(pvobj, **kwargs) - self.verbose = verbose + @staticmethod + def _assemble_ms(dataobj: NDArray, affine: NDArray): + return [Nifti1Image(dataobj=dataobj[:,:,i,...], affine=aff) for i, aff in enumerate(affine)] - def close(self): - """Closes the plugin and clears any associated caches by invoking the clear_cache method. - """ - super().close() - self.clear_cache() - - def clear_cache(self): - """Clears all cached data associated with the plugin. This involves deleting files that have been - cached during plugin operations. - """ - for buffer in self._buffers: - file_path = Path(buffer.name) - if file_path.exists(): - file_path.unlink() + def list_plugin(self): + avail_dict = self.config.avail('plugin') + return {'local': [s for s in avail_dict['local'] if s.type == 'tonifti'], + 'remote': [s for s in avail_dict['remote'] if s.type == 'tonifti']} \ No newline at end of file diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 7acd992..79dfe55 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -4,7 +4,7 @@ from __future__ import annotations import warnings -from nibabel.nifti1 import Nifti1Header +from nibabel.nifti1 import Nifti1Image from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, Literal @@ -12,12 +12,18 @@ class Header: - def __init__(self, scaninfo: 'ScanInfo', + info: ScanInfo + scale_mode: int + nifti1image: 'Nifti1Image' + + def __init__(self, + scaninfo: 'ScanInfo', + nifti1image: 'Nifti1Image', scale_mode: Optional[Literal['header', 'apply']] = None): self.info = scaninfo self.scale_mode = 1 if scale_mode == 'header' else 0 - self.nifti1header = Nifti1Header() - self.nifti1header.default_x_flip = False + self.nifti1image = nifti1image + self.nifti1image.header.default_x_flip = False self._set_scale_params() self._set_sliceorder() self._set_time_step() @@ -44,19 +50,25 @@ def _set_sliceorder(self): "Failed to identify compatible 'slice_code'. " "Please use this header information with care in case slice timing correction is needed." ) - self.nifti1header['slice_code'] = slice_code - + self.nifti1image.header['slice_code'] = slice_code + def _set_time_step(self): + xyzt_unit = {'cycle':('mm', 'sec')} if self.info.cycle['num_cycles'] > 1: - time_step = self.info.cycle['time_step'] - self.nifti1header['pixdim'][4] = time_step + time_step = self.info.cycle['time_step'] / 1000 + self.nifti1image.header['pixdim'][4] = time_step num_slices = self.info.slicepack['num_slices_each_pack'][0] - self.nifti1header['slice_duration'] = time_step / num_slices + self.nifti1image.header['slice_duration'] = time_step / num_slices + self.nifti1image.header.set_xyzt_units(*xyzt_unit['cycle']) def _set_scale_params(self): if self.scale_mode: - self.nifti1header['scl_slope'] = self.info.dataarray['slope'] - self.nifti1header['scl_inter'] = self.info.dataarray['offset'] + self.nifti1image.header.set_slope_inter(slope=self.info.dataarray['slope'], + inter=self.info.dataarray['offset']) + self._update_dtype() + + def _update_dtype(self): + self.nifti1image.header.set_data_dtype(self.nifti1image.dataobj.dtype) def get(self): - return self.nifti1header \ No newline at end of file + return self.nifti1image \ No newline at end of file diff --git a/brkraw/app/tonifti/plugin.py b/brkraw/app/tonifti/plugin.py new file mode 100644 index 0000000..191d17a --- /dev/null +++ b/brkraw/app/tonifti/plugin.py @@ -0,0 +1,54 @@ +from __future__ import annotations +from pathlib import Path +from .base import BaseMethods +from brkraw.api.data import Scan +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Union + + +class ToNiftiPlugin(Scan, BaseMethods): + """Base class for handling plugin operations, integrating scanning and basic method functionalities. + + This class initializes plugin operations with options for verbose output and integrates functionalities + from the Scan and BaseMethods classes. It provides methods to close the plugin and clear any cached data. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): An object representing the PV (ParaVision) scan, reconstruction, + or file data, which is central to initializing the plugin operations. + verbose (bool): Flag to enable verbose output during operations, defaults to False. + **kwargs: Additional keyword arguments that are passed to the superclass. + + Attributes: + verbose (bool): Enables or disables verbose output. + """ + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], + verbose: bool=False, + skip_dependency_check: bool=False, + **kwargs): + """Initializes the BasePlugin with a PV object, optional verbosity, and other parameters. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The primary object associated with ParaVision operations. + verbose (bool, optional): If True, enables verbose output. Defaults to False. + **kwargs: Arbitrary keyword arguments passed to the superclass initialization. + """ + super().__init__(pvobj, **kwargs) + self.verbose: bool = verbose + self.skip_dependency_check: bool = skip_dependency_check + + def close(self): + """Closes the plugin and clears any associated caches by invoking the clear_cache method. + """ + super().close() + self.clear_cache() + + def clear_cache(self): + """Clears all cached data associated with the plugin. This involves deleting files that have been + cached during plugin operations. + """ + for buffer in self._buffers: + file_path = Path(buffer.name) + if file_path.exists(): + file_path.unlink() diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py index f4d71aa..c029157 100644 --- a/brkraw/app/tonifti/scan.py +++ b/brkraw/app/tonifti/scan.py @@ -8,6 +8,7 @@ if TYPE_CHECKING: from typing import Union, Optional, Literal from brkraw.api import PlugInSnippet + from nibabel.nifti1 import Nifti1Image class ScanToNifti(Scan, BaseMethods): @@ -36,7 +37,6 @@ def __init__(self, pvobj = PvFiles(*paths) super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) - @staticmethod def _construct_pvscan(path: 'Path', contents: 'OrderedDict') -> 'PvScan': ref_paths = (path.parent, path.name) @@ -111,11 +111,12 @@ def get_affine_dict(self, reco_id: Optional[int] = None, subj_type = subj_type, subj_position = subj_position) - def get_nifti1header(self, - reco_id: Optional[int] = None, - scale_mode: Optional[Literal['header', 'apply']] = None): + def update_nifti1header(self, + nifti1obj: 'Nifti1Image', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - return super().get_nifti1header(self, reco_id, scale_mode) + return super().update_nifti1header(self, nifti1obj, reco_id, scale_mode) def get_nifti1image(self, reco_id: Optional[int] = None, diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py index 38e0b4c..c310c4e 100644 --- a/brkraw/app/tonifti/study.py +++ b/brkraw/app/tonifti/study.py @@ -8,6 +8,7 @@ from typing import Optional, Literal, Union from pathlib import Path from brkraw.api import PlugInSnippet + from nibabel.nifti1 import Nifti1Header class StudyToNifti(Study, BaseMethods): @@ -76,15 +77,17 @@ def get_affine_dict(self, subj_type=subj_type, subj_position=subj_position) - def get_nifti1header(self, - scan_id: int, - reco_id: Optional[int] = None, - scale_mode: Optional[Literal['header', 'apply']] = None): + def update_nifti1header(self, + nifti1image: 'Nifti1Header', + scan_id: int, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode scanobj = self.get_scan(scan_id=scan_id, reco_id=reco_id) - return super().get_nifti1header(scanobj=scanobj, - scale_mode=scale_mode) + return super().update_nifti1header(scanobj=scanobj, + nifti1image=nifti1image, + scale_mode=scale_mode) def get_nifti1image(self, scan_id: int, @@ -104,4 +107,24 @@ def get_nifti1image(self, subj_position=subj_position, plugin=plugin, plugin_kws=plugin_kws) - \ No newline at end of file + + @property + def info(self): + # scan cycle + header = super().info['header'] + scans = super().info['scans'] + title = header['sw_version'] + date = header['date'] + print(title) + print('-' * len(title)) + print('date: {date}') + for key, value in header.items(): + if key not in ['date', 'sw_version']: + print(f'{key}:\t{value}') + print('\n[ScanID]\tMethod::Protocol') + max_size = len(str(max(scans.keys()))) + + for scan_id, value in scans.items(): + print(f"[{str(scan_id).zfill(max_size)}]\t{value['method']}::{value['protocol']}") + if 'recos' in value and value['recos']: + print('\tRECO:', list(value['recos'].keys())) \ No newline at end of file diff --git a/brkraw/app/tonifti/types.py b/brkraw/app/tonifti/types.py new file mode 100644 index 0000000..2fbaa92 --- /dev/null +++ b/brkraw/app/tonifti/types.py @@ -0,0 +1,18 @@ +from typing import Type, Literal, Optional, Union +from .plugin import ToNiftiPlugin +from .scan import ScanToNifti +from .study import StudyToNifti + + +ToNiftiPluginType = Type[ToNiftiPlugin] + +ScanToNiftiType = Type[ScanToNifti] + +StudyToNiftiType = Type[StudyToNifti] + +ToNiftiObject = Type[Union[ToNiftiPlugin, ScanToNifti, StudyToNifti]] + +ScaleMode = Type[Optional[Literal['header', 'apply']]] + +__all__ = ['ToNiftiPlugin', 'ScanToNifti', 'StudyToNifti'] + diff --git a/brkraw/app/viewer/__init__.py b/brkraw/app/viewer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/brkraw/app/viewer/config.py b/brkraw/app/viewer/config.py new file mode 100644 index 0000000..48cafcc --- /dev/null +++ b/brkraw/app/viewer/config.py @@ -0,0 +1,17 @@ +import sys +if sys.platform == 'darwin': + font = 'arial 14' + button_size = 10 +else: + font = 'arial 10' + button_size = 12 +win_pre_width = 250 +win_pst_width = 1050 +win_pre_height = 40 +win_pst_height = 680 + +window_posx = 100 +window_posy = 100 + +viewer_width = 400 +viewer_height = 400 \ No newline at end of file diff --git a/brkraw/app/viewer/main_win.py b/brkraw/app/viewer/main_win.py new file mode 100644 index 0000000..97b8f66 --- /dev/null +++ b/brkraw/app/viewer/main_win.py @@ -0,0 +1,215 @@ +import tkinter as tk +from tkinter import filedialog +from brkraw import __version__, load +from .scan_list import ScanList +from .scan_info import ScanInfo +from .subj_info import SubjInfo +from .previewer import Previewer +from .config import win_pre_width as _width, win_pre_height as _height +from .config import win_pst_width, win_pst_height +from .config import window_posx, window_posy + +class MainWindow(tk.Tk): + def __init__(self, *args, **kwargs): + super(MainWindow, self).__init__(*args, **kwargs) + self._raw = None + self._ignore_slope = False + self._ignore_offset = False + self._scan_id = None + self._reco_id = None + self._output = None + self.title('BrkRaw GUI - v{}'.format(__version__)) + + # initiated windows size and location + self.geometry('{}x{}+{}+{}'.format(_width, _height, + window_posx, window_posy)) + # minimal size + self.minsize(_width, _height) + self.maxsize(_width, _height) + + self._init_layout() + + def open_filediag(self): + self._path = filedialog.askopenfilename( + initialdir = ".", + title = "Select file", + filetypes = (("Zip compressed", "*.zip"), + ("Paravision 6 format", "*.PVdatasets"), + ("Paravision 360 format", "*.PvDatasets") + )) + self._extend_layout() + self._load_dataset() + + def open_dirdiag(self): + self._path = filedialog.askdirectory( + initialdir = ".", + title = "Select directory") + self._extend_layout() + self._load_dataset() + + def _init_layout(self): + # level 1 + self._subj_info = SubjInfo(self) + self._subj_info.pack( + side=tk.TOP, fill=tk.X, anchor=tk.CENTER) + + # Button binding + self._subj_info._loadfile.config(command=self.open_filediag) + self._subj_info._loaddir.config(command=self.open_dirdiag) + + def _close(self): + if self._raw != None: + self.geometry('{}x{}+{}+{}'.format(_width, _height, + window_posx, window_posy)) + + # close opened frames + self._subj_info._clean_path() + self._subj_info._main_frame.destroy() + self._subj_info._path.destroy() + self._subj_info._path_label.destroy() + # self._subj_info._close.destroy() + self._subj_info._refresh.destroy() + self._main_frame.destroy() + + self._raw.close() + self._raw = None + + # minimal size + self.minsize(_width, _height) + self.maxsize(_width, _height) + + def _extend_layout(self): + # Change windows size + self._close() + if len(self._path) != 0: + self.geometry('{}x{}+{}+{}'.format(win_pst_width, win_pst_height, + window_posx, window_posy)) + self.minsize(win_pst_width, win_pst_height) + self.maxsize(win_pst_width, win_pst_height) + + # extend level 1 + self._subj_info._extend_layout() + # self._subj_info._close.config(command=self._close) + self._subj_info._refresh.config(command=self._refresh) + + self._main_frame = tk.Frame(self) + self._main_frame.pack( + side=tk.BOTTOM, fill=tk.BOTH, expand=True) + + # level 2 + self._scan_list = ScanList(self._main_frame) + view_frame = tk.Frame(self._main_frame) + self._scan_list.pack( + side=tk.LEFT, fill=tk.BOTH) + view_frame.pack( + side=tk.LEFT, fill=tk.BOTH, expand=True) + + # level 3 + self._scan_info = ScanInfo(view_frame) + self._preview = Previewer(view_frame) + self._preview.pack( + side=tk.LEFT, fill=tk.BOTH, expand=True) + self._scan_info.pack( + side=tk.LEFT, fill=tk.BOTH, padx=10, pady=10) + self._bind_scanlist() + self._set_convert_button() + + def _refresh(self): + self._close() + self._extend_layout() + self._load_dataset() + + def _load_dataset(self): + if len(self._path) != 0: + self._raw = load(self._path) + self._init_update() + + def _init_update(self): + # take first image from dataset + self._scan_id, recos = [v for i, v in enumerate(self._raw._avail.items()) if i == 0][0] + + self._reco_id = recos[0] + # update subject info + self._subj_info.load_data(self._raw) + + # update scan and reco listbox + self._scan_list.load_data(self._raw) + self._scan_list._update_recos(self._raw, self._scan_id) + + # update scan info of first image + self._scan_info.load_data(self._raw, self._scan_id, self._reco_id) + + # update preview of first image + self._preview.load_data(self._raw, self._scan_id, self._reco_id) + + def _bind_scanlist(self): + self._scan_list._scanlist.bind('<>', self._update_scanid) + self._scan_list._recolist.bind('<>', self._update_recoid) + + def _update_scanid(self, event): + w = event.widget + index = int(w.curselection()[0]) + self._scan_id = self._raw._pvobj.avail_scan_id[index] + self._reco_id = self._raw._avail[self._scan_id][0] + self._scan_list._update_recos(self._raw, self._scan_id) + self._update_data() + + def _update_recoid(self, event): + w = event.widget + index = int(w.curselection()[0]) + self._reco_id = self._raw._avail[self._scan_id][index] + self._update_data() + + def _update_data(self): + # update scan info of first image + self._scan_info.load_data(self._raw, self._scan_id, self._reco_id) + # update preview of first image + self._preview.load_data(self._raw, self._scan_id, self._reco_id) + + def _set_convert_button(self): + self._scan_list._updt_bt.config(state=tk.NORMAL) + self._scan_list._conv_bt.config(state=tk.NORMAL) + self._scan_list._updt_bt.config(command=self._set_output) + self._scan_list._conv_bt.config(command=self._save_as) + + def _set_output(self): + self._output = filedialog.askdirectory(initialdir=self._output, + title="Select Output Directory") + + def _save_as(self): + date = self._raw.get_scan_time()['date'].strftime("%y%m%d") + pvobj = self._raw._pvobj + acqp = self._raw.get_acqp + this_acqp = acqp(self._scan_id) + scan_name = this_acqp.parameters['ACQ_scan_name'] + scan_name = scan_name.replace(' ','-') + filename = '{}_{}_{}_{}_{}_{}_{}'.format(date, + pvobj.subj_id, + pvobj.session_id, + pvobj.study_id, + self._scan_id, + self._reco_id, + scan_name) + if self._ignore_slope: + slope = None + else: + slope = False + if self._ignore_offset: + offset = None + else: + offset = False + self._raw.save_as(self._scan_id, self._reco_id, filename, + dir=self._output, slope=slope, offset=offset) + method = self._raw._pvobj._method[self._scan_id].parameters['Method'] + import re + if re.search('dti', method, re.IGNORECASE): + self._raw.save_bdata(self._scan_id, filename) + from tkinter import messagebox + messagebox.showinfo(title='File conversion', + message='{}/{}.nii.gz has been converted'.format(self._output, + filename)) + + +if __name__ == '__main__': + root = MainWindow() + root.mainloop() diff --git a/brkraw/app/viewer/previewer.py b/brkraw/app/viewer/previewer.py new file mode 100644 index 0000000..4e42150 --- /dev/null +++ b/brkraw/app/viewer/previewer.py @@ -0,0 +1,225 @@ +import tkinter as tk +from PIL import Image, ImageTk +import numpy as np +from .config import viewer_width, viewer_height + + +class Previewer(tk.Frame): + def __init__(self, *args, **kwargs): + super(Previewer, self).__init__(*args, **kwargs) + # variables + self._dataobj = None + self._imgobj = None + self._is_tripilot = False + self._current_slice = 0 + self._current_frame = 0 + + self.tkimg = None + self.slice_axis = tk.IntVar() + self.slice_axis.set(99) + + self._set_axisbuttons() + self._set_canvas() + self._set_sliders() + + def _set_canvas(self): + self._canvas = tk.Canvas(self, + width=viewer_width, + height=viewer_height) + self._canvas.place(x=50, y=30) + + def _set_axisbuttons(self): + self._axis_buttons = [] + + tk.Label(self, text='Slice Axis::').place(x=50, y=5) + for i, axis in enumerate(['x', 'y', 'z']): + button = tk.Radiobutton(self, + text=axis, + padx=10, + variable=self.slice_axis, + command=self._change_sliceaxis, + value=i) + button.place(x=150 + i*50, y=5) + + if self.slice_axis.get() == 99: + button['state'] = 'disabled' + self._axis_buttons.append(button) + + def _set_sliders(self, n_slice=0, n_frame=0): + + tk.Label(self, text='Slice').place(x=70, y=455) + tk.Label(self, text='Frame').place(x=70, y=495) + self.slice_slider = tk.Scale(self, from_=0, to=n_slice - 1, + orient=tk.HORIZONTAL, + command=self._change_slice, length=300) + + self.frame_slider = tk.Scale(self, from_=0, to=n_frame - 1, + orient=tk.HORIZONTAL, + command=self._change_frame, length=300) + + self.slice_slider.set(self._current_slice) + self.frame_slider.set(self._current_frame) + self.slice_slider.place(x=130, y=440) + self.frame_slider.place(x=130, y=480) + + if n_slice == 0: + self.slice_slider.config(state=tk.DISABLED) + if n_frame == 0: + self.frame_slider.config(state=tk.DISABLED) + + def update_image(self): + self._canvas.create_image((int(viewer_width / 2), int(viewer_height / 2)), + image=self.tkimg) + + def _load_image(self, brkraw_obj, scan_id, reco_id): + from ..lib.utils import multiply_all + # update image when scan_id and reco_id is changed + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + dataobj = brkraw_obj.get_dataobj(scan_id, reco_id, slope=False) + + if len(dataobj.shape) > 3: + x, y, z = dataobj.shape[:3] + f = multiply_all(dataobj.shape[3:]) + # all converted nifti must be 4D + self._dataobj = dataobj.reshape([x, y, z, f])[:,:,::-1, ...] + else: + self._dataobj = dataobj + + # shape = brkraw_obj._get_matrix_size(visu_pars, dataobj) + # self._dataobj = dataobj.reshape(shape[::-1]).T[:,:,::-1, ...] + + n_slicepacks = brkraw_obj._get_slice_info(visu_pars)['num_slice_packs'] + spatial_info = brkraw_obj._get_spatial_info(visu_pars) + + self._resol = spatial_info['spatial_resol'] + self._matrix_size = spatial_info['matrix_size'] + + if n_slicepacks > 1: + self._is_tripilot = True + else: + self._is_tripilot = False + + def _change_sliceaxis(self): + if self.slice_axis.get() in range(3): + self._imgobj = np.swapaxes(self._dataobj, axis1=self.slice_axis.get(), axis2=2) + shape = self._imgobj.shape + n_slice = shape[2] + + self._current_slice = int(n_slice / 2) + self._current_frame = 0 + + shape = self._imgobj.shape + if len(shape) > 3: + n_frame = shape[3] + else: + n_frame = 0 + n_slice = shape[2] + + self._current_slice = int(n_slice / 2) + self._current_frame = 0 + + self._set_sliders(n_slice, n_frame) + + def _convert_image(self): + if len(self._imgobj.shape) > 3: + img = self._imgobj[:,:,self._current_slice,self._current_frame] + else: + img = self._imgobj[:,:,self._current_slice] + + slice_axis = self.slice_axis.get() + if slice_axis in range(3): + axis_ref = np.array([0, 1, 2]) + axis_ref[slice_axis], axis_ref[2] = axis_ref[2], axis_ref[slice_axis] + + self._img_resol = np.array(self._resol[0])[axis_ref] + self._img_size = np.array(self._matrix_size[0])[axis_ref] + else: + self._img_resol = np.array(self._resol[0]) + self._img_size = np.array(self._matrix_size[0]) + + img_fov = self._img_resol.astype(float) * self._img_size.astype(float) + max_val = img_fov[:2].max() + img_fov /= max_val + img_fov *= 400 + + # check resolution + img_width, img_height = int(img_fov[0]), int(img_fov[1]) + + self.tkimg = self.convert_pil2tk(self.convert_npy2pil(img), + img_width, img_height) + + def _change_slice(self, event): + self._current_slice = self.slice_slider.get() + self._convert_image() + self.update_image() + + def _change_frame(self, event): + self._current_frame = self.frame_slider.get() + self._convert_image() + self.update_image() + + def load_data(self, brkraw_obj, scan_id, reco_id): + # load image from dataset + self._load_image(brkraw_obj, scan_id, reco_id) + shape = self._dataobj.shape + if len(shape) > 3: + n_frame = shape[3] + else: + n_frame = 0 + n_slice = shape[2] + + self._current_slice = int(n_slice/2) + self._current_frame = 0 + + if self._is_tripilot: + self.slice_axis.set(99) + for button in self._axis_buttons: + button['state'] = 'disabled' + else: + for button in self._axis_buttons: + button['state'] = 'normal' + self.slice_axis.set(2) + self._set_sliders(n_slice, n_frame) + self._imgobj = self._dataobj + self._convert_image() + self.update_image() + + @staticmethod + def convert_npy2pil(data, mode=None, rescale=True): + """ convert 2D numpy.array to PIL.Image object + + Args: + data: 2D array data + mode: mode of image object + link=https://pillow.readthedocs.io/en/latest/handbook/concepts.html#modes + rescale: rescale value to 0~255 + + Returns: PIL.Image object + + """ + if rescale == True: + rescaled_data = data / data.max() * 255 + else: + rescaled_data = data + rescaled_data = rescaled_data.astype('uint8') + return Image.fromarray(rescaled_data.T, mode=mode) + + @staticmethod + def convert_pil2tk(pilobj, width, height, method='nearest'): + """ convert PIL.Image object to tkinter.PhotoImage object + This will allow plotting image on Tk.Canvas + + Args: + pilobj: 2D image object + width: width of the image + height: height of the image + method: Method for interpolation + + Returns: TkImage object + + """ + if method == 'nearest': + method = Image.NEAREST + else: + method = Image.ANTIALIAS + return ImageTk.PhotoImage(pilobj.resize((width, height), method)) \ No newline at end of file diff --git a/brkraw/app/viewer/scan_info.py b/brkraw/app/viewer/scan_info.py new file mode 100644 index 0000000..3a5bb91 --- /dev/null +++ b/brkraw/app/viewer/scan_info.py @@ -0,0 +1,72 @@ +import tkinter as tk +from .config import font + + +class ScanInfo(tk.Frame): + def __init__(self, *args, **kwargs): + super(ScanInfo, self).__init__(*args, **kwargs) + self.title = tk.Label(self, text='Selected Scan Info') + self.title.pack(side=tk.TOP, fill=tk.X) + self.textbox = tk.Text(self, width=30) + self.textbox.pack(side=tk.TOP, fill=tk.BOTH, expand=True) + self.textbox.configure(font=font) + + def load_data(self, brkraw_obj, scan_id, reco_id): + from brkraw.lib.utils import get_value, is_all_element_same + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + self.textbox.config(state=tk.NORMAL) + self.textbox.delete('1.0', tk.END) + + # RepetitionTime + tr = get_value(visu_pars, 'VisuAcqRepetitionTime') + tr = ','.join(map(str, tr)) if isinstance(tr, list) else tr + # EchoTime + te = get_value(visu_pars, 'VisuAcqEchoTime') + te = 0 if te is None else te + te = ','.join(map(str, te)) if isinstance(te, list) else te + # PixelBandwidth + pixel_bw = get_value(visu_pars, 'VisuAcqPixelBandwidth') + # FlipAngle + flip_angle = get_value(visu_pars, 'VisuAcqFlipAngle') + # Sequence and Protocol names + sequence_name = get_value(visu_pars, 'VisuAcqSequenceName') + protocol_name = get_value(visu_pars, 'VisuAcquisitionProtocol') + acqpars = brkraw_obj.get_acqp(int(scan_id)) + scan_name = acqpars._parameters['ACQ_scan_name'] + # Dimension + dim = brkraw_obj._get_dim_info(visu_pars)[0] + # MatrixSize + size = brkraw_obj._get_matrix_size(visu_pars) + size = ' x '.join(map(str, size)) + # FOV size and resolution + spatial_info = brkraw_obj._get_spatial_info(visu_pars) + temp_info = brkraw_obj._get_temp_info(visu_pars) + s_resol = spatial_info['spatial_resol'] + fov_size = spatial_info['fov_size'] + fov_size = ' x '.join(map(str, fov_size)) + s_unit = spatial_info['unit'] + t_resol = '{0:.3f}'.format(temp_info['temporal_resol']) + t_unit = temp_info['unit'] + s_resol = list(s_resol[0]) if is_all_element_same(s_resol) else s_resol + s_resol = ' x '.join(['{0:.3f}'.format(r) for r in s_resol]) + # Number of slice packs + n_slicepacks = brkraw_obj._get_slice_info(visu_pars)['num_slice_packs'] + + # Printing out + self.textbox.insert(tk.END, 'Sequence:\n - {}\n'.format(sequence_name)) + self.textbox.insert(tk.END, 'Protocol:\n - {}\n'.format(protocol_name)) + self.textbox.insert(tk.END, 'Scan Name:\n - {}\n'.format(scan_name)) + self.textbox.insert(tk.END, 'RepetitionTime:\n - {} msec\n'.format(tr)) + self.textbox.insert(tk.END, 'EchoTime:\n - {} msec\n'.format(te)) + self.textbox.insert(tk.END, 'FlipAngle:\n - {} degree\n\n'.format(flip_angle)) + if isinstance(pixel_bw, float): + self.textbox.insert(tk.END, 'PixelBandwidth:\n - {0:.3f} Hz\n'.format(pixel_bw)) + else: + self.textbox.insert(tk.END, 'PixelBandwidth:\n - {} Hz\n'.format(pixel_bw)) + self.textbox.insert(tk.END, 'Dimension:\n - {}D\n'.format(dim)) + self.textbox.insert(tk.END, 'Matrix size:\n - {}\n'.format(size)) + self.textbox.insert(tk.END, 'Number of SlicePacks:\n - {}\n'.format(n_slicepacks)) + self.textbox.insert(tk.END, 'FOV size:\n - {} (mm)\n\n'.format(fov_size)) + self.textbox.insert(tk.END, 'Spatial resolution:\n - {} ({})\n'.format(s_resol, s_unit)) + self.textbox.insert(tk.END, 'Temporal resolution:\n - {} ({})\n'.format(t_resol, t_unit)) + self.textbox.config(state=tk.DISABLED) \ No newline at end of file diff --git a/brkraw/app/viewer/scan_list.py b/brkraw/app/viewer/scan_list.py new file mode 100644 index 0000000..39ae206 --- /dev/null +++ b/brkraw/app/viewer/scan_list.py @@ -0,0 +1,73 @@ +import tkinter as tk +from .config import font + + +class ScanList(tk.Frame): + def __init__(self, *args, **kwargs): + super(ScanList, self).__init__(*args, **kwargs) + self._init_scanlist() + self._init_recolist() + self._init_buttons() + + def _init_scanlist(self): + self._scanlist_label = tk.Label(self, text='Scan ID / Protocol') + self._scanlist_label.pack(side=tk.TOP, fill=tk.X, pady=5) + self._scanlist_frame = tk.Frame(self) + self._scanlist_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=10) + self._scanlist= tk.Listbox(self._scanlist_frame, width=30, + exportselection=False) + self._scanlist.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._set_scollbar(self._scanlist_frame, self._scanlist) + self._scanlist.config(font=font, state=tk.DISABLED) + self._scanlist_label.config(font=font) + + def _init_recolist(self): + self._recolist_label = tk.Label(self, text='Reco ID / DataType') + self._recolist_label.pack(side=tk.TOP, fill=tk.X, pady=5) + self._recolist_frame = tk.Frame(self, height=5) + self._recolist_frame.pack(side=tk.TOP, fill=tk.BOTH, padx=10) + self._recolist = tk.Listbox(self._recolist_frame, width=30, height=5, + exportselection=False) + self._recolist.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._set_scollbar(self._recolist_frame, self._recolist) + self._recolist.config(font=font, state = tk.DISABLED) + self._recolist_label.config(font=font) + + def _init_buttons(self): + self._button_fm = tk.Frame(self) + self._button_fm.pack(side=tk.TOP, fill=tk.X) + self._updt_bt = tk.Button(self._button_fm, text='SetOutput') + self._conv_bt = tk.Button(self._button_fm, text='Convert') + self._updt_bt.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._conv_bt.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._updt_bt.config(state=tk.DISABLED, font=font) + self._conv_bt.config(state=tk.DISABLED, font=font) + + @staticmethod + def _set_scollbar(frame, listbox_obj): + scrollbar = tk.Scrollbar(frame, orient=tk.VERTICAL) + scrollbar.config(command=listbox_obj.yview) + scrollbar.pack(side=tk.RIGHT, fill="y") + listbox_obj.config(yscrollcommand=scrollbar.set) + + def load_data(self, brkraw_obj): + from brkraw.lib.utils import get_value + self._scanlist.config(state=tk.NORMAL) + for scan_id, recos in brkraw_obj._avail.items(): + visu_pars = brkraw_obj._get_visu_pars(scan_id, recos[0]) + protocol_name = get_value(visu_pars, 'VisuAcquisitionProtocol') + self._scanlist.insert(tk.END, '{}::{}'.format(str(scan_id).zfill(3), + protocol_name)) + self._scanlist.select_set(0) + + def _update_recos(self, brkraw_obj, scan_id): + from brkraw.lib.utils import get_value + self._recolist.config(state=tk.NORMAL) + recos = brkraw_obj._avail[scan_id] + self._recolist.delete(0, tk.END) + for reco_id in recos: + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + frame_type = get_value(visu_pars, 'VisuCoreFrameType') + self._recolist.insert(tk.END, '{}::{}'.format(str(reco_id).zfill(3), + frame_type)) + self._recolist.select_set(0) diff --git a/brkraw/app/viewer/subj_info.py b/brkraw/app/viewer/subj_info.py new file mode 100644 index 0000000..56105c1 --- /dev/null +++ b/brkraw/app/viewer/subj_info.py @@ -0,0 +1,128 @@ +import tkinter as tk +from .config import font, button_size + + +class LabelItem(tk.Frame): + def __init__(self, *args, **kwargs): + super(LabelItem, self).__init__(*args, **kwargs) + + def set_label(self, text): + self.label = tk.Label(self, text=text, width=8, anchor=tk.CENTER) + self.entry = tk.Entry(self) + self.label.pack(side=tk.LEFT, fill=tk.X, + anchor=tk.W, ipadx=5) + self.entry.pack(side=tk.LEFT, fill=tk.X, + anchor=tk.W, ipadx=5) + self.label.configure(font=font) + self.entry.config(width=16, font=font) + + def set_entry(self, text): + self.entry.config(state=tk.NORMAL) + self.entry.delete(0, tk.END) + if text == None: + self.entry.insert(tk.END, '') + self.entry.config(state=tk.DISABLED) + else: + self.entry.insert(tk.END, text) + self.entry.config(state="readonly") + + +class SubjInfo(tk.Frame): + def __init__(self, *args, **kwargs): + super(SubjInfo, self).__init__(*args, **kwargs) + self._init_layout() + self.config(padx=10) + + def _init_layout(self): + self._upper_frame = tk.Frame(self) + self._upper_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._init_upper_frame() + + def _extend_layout(self): + self._path_label = tk.Label(self._upper_frame, text='DataPath', + width=button_size, font=font) + self._path_label.pack(side=tk.LEFT, anchor=tk.E) + # self._close = tk.Button(self._upper_frame, text='Close', + # font=font, width=button_size) + # self._close.pack(side=tk.RIGHT) + self._refresh = tk.Button(self._upper_frame, text='Refresh', + font=font, width=button_size) + self._refresh.pack(side=tk.RIGHT) + self._path = tk.Text(self._upper_frame, height=1, font=font) + self._path.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._path.config(state=tk.DISABLED) + + self._main_frame = tk.Frame(self) + self._main_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._init_main_frame() + + def _set_path(self, brkraw_obj): + self._path.config(state=tk.NORMAL) + self._path.insert(tk.END, brkraw_obj._pvobj.path) + self._path.config(state=tk.DISABLED) + + def _clean_path(self): + self._path.config(state=tk.NORMAL) + self._path.delete(1.0, tk.END) + self._path.config(state=tk.DISABLED) + + def _init_upper_frame(self): + self._loadfile = tk.Button(self._upper_frame, text='Open File', + font=font, width=button_size) + self._loaddir = tk.Button(self._upper_frame, text='Open Directory', + font=font, width=button_size) + self._loadfile.pack(side=tk.LEFT) + self._loaddir.pack(side=tk.LEFT) + + def _init_main_frame(self): + self._c0 = tk.Frame(self._main_frame) + self._c0.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c1 = tk.Frame(self._main_frame) + self._c1.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c2 = tk.Frame(self._main_frame) + self._c2.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c3 = tk.Frame(self._main_frame) + self._c3.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._init_labelitems() + + @staticmethod + def _set_labelitem(frame, label, text=None): + item = LabelItem(frame) + item.pack(side=tk.TOP) + item.set_label(label) + item.set_entry(text) + return item + + def _init_labelitems(self): + self._account = self._set_labelitem(self._c0, 'Account') + self._scandate = self._set_labelitem(self._c0, 'Scan Date') + self._researcher = self._set_labelitem(self._c0, 'Researcher') + self._subjectid = self._set_labelitem(self._c1, 'Subject ID') + self._sessionid = self._set_labelitem(self._c1, 'Session ID') + self._studyid = self._set_labelitem(self._c1, 'Study ID') + self._dob = self._set_labelitem(self._c2, 'DOB') + self._sex = self._set_labelitem(self._c2, 'Sex') + self._weight = self._set_labelitem(self._c2, 'Weight') + self._type = self._set_labelitem(self._c3, 'Type') + self._position = self._set_labelitem(self._c3, 'Position') + self._entry = self._set_labelitem(self._c3, 'Entry') + + def load_data(self, brkraw_obj): + try: + datetime = brkraw_obj.get_scan_time() + except Exception: + datetime = dict(date='N/A', start_time='N/A') + pvobj = brkraw_obj._pvobj + self._account.set_entry(pvobj.user_account) + self._researcher.set_entry(pvobj.user_name) + self._scandate.set_entry('{}, {}'.format(datetime['date'], datetime['start_time'])) + self._subjectid.set_entry(pvobj.subj_id) + self._sessionid.set_entry(pvobj.session_id) + self._studyid.set_entry(pvobj.study_id) + self._dob.set_entry(pvobj.subj_dob) + self._sex.set_entry(pvobj.subj_sex) + self._weight.set_entry('{} kg'.format(pvobj.subj_weight)) + self._type.set_entry(pvobj.subj_type) + self._position.set_entry(pvobj.subj_pose) + self._entry.set_entry(pvobj.subj_entry) + self._set_path(brkraw_obj) \ No newline at end of file diff --git a/brkraw/api/config/config.yaml b/brkraw/config.yaml similarity index 52% rename from brkraw/api/config/config.yaml rename to brkraw/config.yaml index 04176d3..265e5bd 100644 --- a/brkraw/api/config/config.yaml +++ b/brkraw/config.yaml @@ -1,26 +1,20 @@ -# default configuration for brkraw -snippets: +plugin: repo: - name: brkraw-snippets url: https://github.com/brkraw/brkraw-snippets.git - plugin: - path: plugin - template: - - myplugin + path: plugin preset: path: preset - template: - - mypreset spec: path: spec recipe: path: recipe - -studyinfo: - recipe: default - + app: tonifti: - output_filename: + output_filename: + recipy: "@brkraw-snippets,better-tonifti:studyinfo" format: ___ - filter: \ No newline at end of file + spec: null + studyinfo: + recipe: brkraw-snippets:studyinfo #:: \ No newline at end of file diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..42aaab3 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version=3.10 +ignore_missing_imports = True \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index a6de40d..61786a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,19 +11,28 @@ classifiers = [ ] dependencies = [ 'pyyaml>=6.0.1', - 'nibabel>=3.0.2', 'numpy>=1.18.0', 'tqdm>=4.45.0', - 'pillow>=7.1.1', + 'packaging>=23.1', + 'xnippy==0.0.1' ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} dynamic = ["version"] -maintainers = [{name = "SungHo Lee", email = 'shlee@unc.edu'}] +maintainers = [ + {name = "SungHo Lee", email = 'shlee@unc.edu'} + ] name = "brkraw" readme = "README.md" requires-python = ">=3.7" -keywords = ['bruker', 'data_handler', 'converter', 'administrator_tool'] +keywords = [ + 'bruker', + 'data_handler', + 'converter', + 'administrator_tool', + 'extensible', + 'xoani' + ] [project.urls] Homepage = "https://brkraw.github.io" @@ -33,15 +42,23 @@ legacy = [ 'pandas>=1.0.0', 'openpyxl>=3.0.3', 'xlrd>=1.0.0', - 'SimpleITK>=1.2.4', -] + 'SimpleITK>=1.2.4' + ] + +tonifti = [ + 'nibabel>=3.0.2' + ] + +viewer = [ + 'pillow>=7.1.1' + ] dev = [ "flake8", "pytest", "nbmake", "types-PyYAML" -] + ] [tool.hatch.version] path = "brkraw/__init__.py" diff --git a/tests/01_api_pvobj_test.py b/tests/01_api_pvobj_test.py new file mode 100644 index 0000000..161511e --- /dev/null +++ b/tests/01_api_pvobj_test.py @@ -0,0 +1,19 @@ +def test_loading(dataset): + scan_contents = ['method', 'acqp'] + reco_contents = ['2dseq', 'visu_pars', 'reco'] + + for v, subset in dataset.items(): + print(f'- v{v}:') + for fname, rawobj in subset.items(): + print(f' + testing {fname}') + for scan_id in rawobj.avail: + scanobj = rawobj.get_scan(scan_id) + failed = sum([int(f in scan_contents) for f in scanobj._contents['files']]) < len(scan_contents) + if failed: + print(f' - [{scan_id}] object does not contain all {scan_contents}') + else: + for reco_id in scanobj.avail: + recoobj = scanobj.get_reco(reco_id) + failed = sum([int(f in reco_contents) for f in recoobj.contents['files']]) < len(reco_contents) + if failed: + print(f' - [{scan_id}][{reco_id}] object does not contain all {reco_contents}') \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e40982f --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,40 @@ +import pytest +import re +from pathlib import Path +from brkraw.api.pvobj import PvStudy +from pprint import pprint + +# test functions +def get_version(raw): + ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' + for scan_id in raw.avail: + pvscan = raw.get_scan(scan_id) + if version := pvscan.acqp.get('ACQ_sw_version'): + if matched := re.match(ptrn, version): + return matched.groupdict()['version'] + +def check_contents(path: Path): + if path.is_dir(): + if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): + return PvStudy(path) + for e in path.iterdir(): + return check_contents(e) + elif path.is_file(): + if path.name.endswith('.zip'): + return PvStudy(path) + +@pytest.fixture +def dataset(): + return get_dataset() + +def get_dataset(): + dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') + + dataset = {} + for contents in dataset_path.iterdir(): + if raw := check_contents(contents): + if version := get_version(raw): + if version not in dataset.keys(): + dataset[version] = {} + dataset[version][raw.path.name] = raw + return dataset \ No newline at end of file From 9a9d4b3d5635fc1fa58e822c7155ab3bd1103642 Mon Sep 17 00:00:00 2001 From: SungHo Lee Date: Sun, 5 May 2024 22:34:46 -0400 Subject: [PATCH 10/16] fix(config) update dependency related changes --- .gitignore | 4 ++- brkraw/__init__.py | 4 +-- brkraw/api/data/study.py | 8 ++--- brkraw/app/tonifti/base.py | 7 ++-- pyproject.toml | 2 +- tests/01_api_pvobj_test.py | 36 ++++++++++---------- tests/conftest.py | 70 +++++++++++++++++++------------------- 7 files changed, 66 insertions(+), 65 deletions(-) diff --git a/.gitignore b/.gitignore index 2c9f9c3..daf002e 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,6 @@ build tests/.brkraw tests/_*.ipynb tests/tutorials -tests/_datasets \ No newline at end of file +tests/_datasets + +.python-version \ No newline at end of file diff --git a/brkraw/__init__.py b/brkraw/__init__.py index d2fcca9..8ef9754 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,8 +1,8 @@ from .lib import * -from xnippy import Xnippy as ConfigManager +from xnippy import XnippyManager __version__ = '0.4.0' -config = ConfigManager(package_name=__package__, +config = XnippyManager(package_name=__package__, package_version=__version__, package__file__=__file__, config_filename='config.yaml') diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index 1bed535..de2712c 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -34,7 +34,7 @@ from brkraw import config from brkraw.api.pvobj import PvStudy from brkraw.api.analyzer.base import BaseAnalyzer -from xnippy.formatter import RecipeFormatter +from xnippy.parser import RecipeParser from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional @@ -158,7 +158,7 @@ def _process_header(self): spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') # TODO:asdasd with open(spec_path, 'r') as f: spec = yaml.safe_load(f) - self._info = StudyHeader(header=RecipeFormatter(self, copy(spec)['study']).get(), + self._info = StudyHeader(header=RecipeParser(self, copy(spec)['study']).get(), scans=[]) with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -168,13 +168,13 @@ def _process_header(self): scaninfo_targets = [scanobj.info, scanobj.get_scaninfo(get_analyzer=True)] scan_header = ScanHeader(scan_id=scan_id, - header=RecipeFormatter(scaninfo_targets, scan_spec).get(), + header=RecipeParser(scaninfo_targets, scan_spec).get(), recos=[]) for reco_id in scanobj.avail: recoinfo_targets = [scanobj.get_scaninfo(reco_id=reco_id), scanobj.get_scaninfo(reco_id=reco_id, get_analyzer=True)] reco_spec = copy(spec)['reco'] - reco_header = RecipeFormatter(recoinfo_targets, reco_spec).get() + reco_header = RecipeParser(recoinfo_targets, reco_spec).get() reco_header = RecoHeader(reco_id=reco_id, header=reco_header) if reco_header else None if reco_header: diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 4e28efe..cb970ac 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -124,20 +124,19 @@ def _bypass_method_via_plugin(scanobj: 'Scan', plugin = BaseMethods._get_plugin_snippets_by_name(plugin) if isinstance(plugin, PlugInSnippet) and plugin.type == 'tonifti': print(f'++ Installed PlugIn: {plugin}') - with plugin.set(pvobj=scanobj.pvobj, **plugin_kws) as p: + with plugin.run(pvobj=scanobj.pvobj, **plugin_kws) as p: nifti1image = p.get_nifti1image(subj_type=subj_type, subj_position=subj_position) return nifti1image else: - fetcher = config.get_fetcher('plugin') warnings.warn("Failed. Given plugin not available, " "please install local plugin or use from available on " - f"remote repository. -> {[p.name for p in fetcher.remote]}", + f"remote repository. -> {[p.name for p in config.avail]}", UserWarning) return None @staticmethod def _get_plugin_snippets_by_name(plugin: str): - fetcher = config.get_fetcher('plugin') + fetcher = config._fetcher if not fetcher.is_cache: plugin = BaseMethods._filter_snippets_by_name(plugin, fetcher.local) if fetcher.is_cache or not isinstance(plugin, PlugInSnippet): diff --git a/pyproject.toml b/pyproject.toml index 61786a0..c1df13c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ 'numpy>=1.18.0', 'tqdm>=4.45.0', 'packaging>=23.1', - 'xnippy==0.0.1' + 'xnippy>=0.1.1' ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} diff --git a/tests/01_api_pvobj_test.py b/tests/01_api_pvobj_test.py index 161511e..3e2a835 100644 --- a/tests/01_api_pvobj_test.py +++ b/tests/01_api_pvobj_test.py @@ -1,19 +1,19 @@ -def test_loading(dataset): - scan_contents = ['method', 'acqp'] - reco_contents = ['2dseq', 'visu_pars', 'reco'] +# def test_loading(dataset): +# scan_contents = ['method', 'acqp'] +# reco_contents = ['2dseq', 'visu_pars', 'reco'] - for v, subset in dataset.items(): - print(f'- v{v}:') - for fname, rawobj in subset.items(): - print(f' + testing {fname}') - for scan_id in rawobj.avail: - scanobj = rawobj.get_scan(scan_id) - failed = sum([int(f in scan_contents) for f in scanobj._contents['files']]) < len(scan_contents) - if failed: - print(f' - [{scan_id}] object does not contain all {scan_contents}') - else: - for reco_id in scanobj.avail: - recoobj = scanobj.get_reco(reco_id) - failed = sum([int(f in reco_contents) for f in recoobj.contents['files']]) < len(reco_contents) - if failed: - print(f' - [{scan_id}][{reco_id}] object does not contain all {reco_contents}') \ No newline at end of file +# for v, subset in dataset.items(): +# print(f'- v{v}:') +# for fname, rawobj in subset.items(): +# print(f' + testing {fname}') +# for scan_id in rawobj.avail: +# scanobj = rawobj.get_scan(scan_id) +# failed = sum([int(f in scan_contents) for f in scanobj._contents['files']]) < len(scan_contents) +# if failed: +# print(f' - [{scan_id}] object does not contain all {scan_contents}') +# else: +# for reco_id in scanobj.avail: +# recoobj = scanobj.get_reco(reco_id) +# failed = sum([int(f in reco_contents) for f in recoobj.contents['files']]) < len(reco_contents) +# if failed: +# print(f' - [{scan_id}][{reco_id}] object does not contain all {reco_contents}') diff --git a/tests/conftest.py b/tests/conftest.py index e40982f..0d7be6e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,40 +1,40 @@ -import pytest -import re -from pathlib import Path -from brkraw.api.pvobj import PvStudy -from pprint import pprint +# import pytest +# import re +# from pathlib import Path +# from brkraw.api.pvobj import PvStudy +# from pprint import pprint -# test functions -def get_version(raw): - ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' - for scan_id in raw.avail: - pvscan = raw.get_scan(scan_id) - if version := pvscan.acqp.get('ACQ_sw_version'): - if matched := re.match(ptrn, version): - return matched.groupdict()['version'] +# # test functions +# def get_version(raw): +# ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' +# for scan_id in raw.avail: +# pvscan = raw.get_scan(scan_id) +# if version := pvscan.acqp.get('ACQ_sw_version'): +# if matched := re.match(ptrn, version): +# return matched.groupdict()['version'] -def check_contents(path: Path): - if path.is_dir(): - if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): - return PvStudy(path) - for e in path.iterdir(): - return check_contents(e) - elif path.is_file(): - if path.name.endswith('.zip'): - return PvStudy(path) +# def check_contents(path: Path): +# if path.is_dir(): +# if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): +# return PvStudy(path) +# for e in path.iterdir(): +# return check_contents(e) +# elif path.is_file(): +# if path.name.endswith('.zip'): +# return PvStudy(path) -@pytest.fixture -def dataset(): - return get_dataset() +# @pytest.fixture +# def dataset(): +# return get_dataset() -def get_dataset(): - dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') +# def get_dataset(): +# dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') - dataset = {} - for contents in dataset_path.iterdir(): - if raw := check_contents(contents): - if version := get_version(raw): - if version not in dataset.keys(): - dataset[version] = {} - dataset[version][raw.path.name] = raw - return dataset \ No newline at end of file +# dataset = {} +# for contents in dataset_path.iterdir(): +# if raw := check_contents(contents): +# if version := get_version(raw): +# if version not in dataset.keys(): +# dataset[version] = {} +# dataset[version][raw.path.name] = raw +# return dataset \ No newline at end of file From f1f7ee7d2ef9aacf2225a95242a3f1714435f0e5 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 5 May 2024 22:40:43 -0400 Subject: [PATCH 11/16] fix(dependency) rollback dependency update - nibabel --- pyproject.toml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c1df13c..5f3339c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,8 @@ dependencies = [ 'numpy>=1.18.0', 'tqdm>=4.45.0', 'packaging>=23.1', - 'xnippy>=0.1.1' + 'xnippy>=0.1.1', + 'nibabel>=3.0.2' ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} @@ -45,10 +46,6 @@ legacy = [ 'SimpleITK>=1.2.4' ] -tonifti = [ - 'nibabel>=3.0.2' - ] - viewer = [ 'pillow>=7.1.1' ] From 1ffb67dfbbfd09310c9595d6536ec81c5f73508a Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 5 May 2024 22:42:03 -0400 Subject: [PATCH 12/16] dependency --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5f3339c..0220106 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,8 @@ dependencies = [ 'tqdm>=4.45.0', 'packaging>=23.1', 'xnippy>=0.1.1', - 'nibabel>=3.0.2' + 'nibabel>=3.0.2', + 'requests>=2.31.0' ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} From 9d0dedc7f97e2f9d75a45e8b19231f6fec758df9 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 5 May 2024 22:44:00 -0400 Subject: [PATCH 13/16] dependency --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0220106..bfe7563 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,10 @@ dependencies = [ 'packaging>=23.1', 'xnippy>=0.1.1', 'nibabel>=3.0.2', - 'requests>=2.31.0' + 'requests>=2.31.0', + 'pandas>=1.0.0', + 'openpyxl>=3.0.3', + 'xlrd>=1.0.0', ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} @@ -41,9 +44,6 @@ Homepage = "https://brkraw.github.io" [project.optional-dependencies] legacy = [ - 'pandas>=1.0.0', - 'openpyxl>=3.0.3', - 'xlrd>=1.0.0', 'SimpleITK>=1.2.4' ] From 5e4c050f3eb3c3835ec908fd295cc35cc538efd6 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 5 May 2024 23:24:17 -0400 Subject: [PATCH 14/16] fix(config) xnippy compatibility --- brkraw/app/tonifti/base.py | 6 ++-- brkraw/config.yaml | 13 +++---- pyproject.toml | 2 +- tests/conftest.py | 70 +++++++++++++++++++------------------- 4 files changed, 43 insertions(+), 48 deletions(-) diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index cb970ac..c265c63 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -12,11 +12,11 @@ from typing import Optional, Union, Literal from typing import List from numpy.typing import NDArray - from xnippy.types import ConfigManagerType + from xnippy.types import XnipyManagerType class BaseMethods(BaseBufferHandler): - config: ConfigManagerType = config + config: XnipyManagerType = config def set_scale_mode(self, scale_mode: Optional[Literal['header', 'apply']] = None): @@ -122,7 +122,7 @@ def _bypass_method_via_plugin(scanobj: 'Scan', plugin_kws: Optional[dict] = None) -> Optional[Nifti1Image]: if isinstance(plugin, str): plugin = BaseMethods._get_plugin_snippets_by_name(plugin) - if isinstance(plugin, PlugInSnippet) and plugin.type == 'tonifti': + if isinstance(plugin, PlugInSnippet) and 'brkraw' in plugin._manifest['package']: # TODO: need to have better tool to check version compatibility as well. print(f'++ Installed PlugIn: {plugin}') with plugin.run(pvobj=scanobj.pvobj, **plugin_kws) as p: nifti1image = p.get_nifti1image(subj_type=subj_type, subj_position=subj_position) diff --git a/brkraw/config.yaml b/brkraw/config.yaml index 265e5bd..208255e 100644 --- a/brkraw/config.yaml +++ b/brkraw/config.yaml @@ -1,15 +1,10 @@ -plugin: +xnippy: repo: - name: brkraw-snippets url: https://github.com/brkraw/brkraw-snippets.git - path: plugin - preset: - path: preset - spec: - path: spec - recipe: - path: recipe - + plugin: + path: plugin + app: tonifti: output_filename: diff --git a/pyproject.toml b/pyproject.toml index bfe7563..e1c0d02 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ 'numpy>=1.18.0', 'tqdm>=4.45.0', 'packaging>=23.1', - 'xnippy>=0.1.1', + 'xnippy>=0.1.3', 'nibabel>=3.0.2', 'requests>=2.31.0', 'pandas>=1.0.0', diff --git a/tests/conftest.py b/tests/conftest.py index 0d7be6e..e40982f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,40 +1,40 @@ -# import pytest -# import re -# from pathlib import Path -# from brkraw.api.pvobj import PvStudy -# from pprint import pprint +import pytest +import re +from pathlib import Path +from brkraw.api.pvobj import PvStudy +from pprint import pprint -# # test functions -# def get_version(raw): -# ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' -# for scan_id in raw.avail: -# pvscan = raw.get_scan(scan_id) -# if version := pvscan.acqp.get('ACQ_sw_version'): -# if matched := re.match(ptrn, version): -# return matched.groupdict()['version'] +# test functions +def get_version(raw): + ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' + for scan_id in raw.avail: + pvscan = raw.get_scan(scan_id) + if version := pvscan.acqp.get('ACQ_sw_version'): + if matched := re.match(ptrn, version): + return matched.groupdict()['version'] -# def check_contents(path: Path): -# if path.is_dir(): -# if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): -# return PvStudy(path) -# for e in path.iterdir(): -# return check_contents(e) -# elif path.is_file(): -# if path.name.endswith('.zip'): -# return PvStudy(path) +def check_contents(path: Path): + if path.is_dir(): + if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): + return PvStudy(path) + for e in path.iterdir(): + return check_contents(e) + elif path.is_file(): + if path.name.endswith('.zip'): + return PvStudy(path) -# @pytest.fixture -# def dataset(): -# return get_dataset() +@pytest.fixture +def dataset(): + return get_dataset() -# def get_dataset(): -# dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') +def get_dataset(): + dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') -# dataset = {} -# for contents in dataset_path.iterdir(): -# if raw := check_contents(contents): -# if version := get_version(raw): -# if version not in dataset.keys(): -# dataset[version] = {} -# dataset[version][raw.path.name] = raw -# return dataset \ No newline at end of file + dataset = {} + for contents in dataset_path.iterdir(): + if raw := check_contents(contents): + if version := get_version(raw): + if version not in dataset.keys(): + dataset[version] = {} + dataset[version][raw.path.name] = raw + return dataset \ No newline at end of file From a0e4da9583e8016ed12c347c692f1b575e157155 Mon Sep 17 00:00:00 2001 From: SungHo Lee Date: Mon, 6 May 2024 00:29:28 -0400 Subject: [PATCH 15/16] dependent module rename --- brkraw/__init__.py | 4 ++-- brkraw/api/__init__.py | 5 ++--- brkraw/api/data/study.py | 2 +- brkraw/api/pvobj/base.py | 2 +- brkraw/app/tonifti/__init__.py | 2 +- brkraw/app/tonifti/base.py | 6 +++--- brkraw/config.yaml | 2 +- 7 files changed, 11 insertions(+), 12 deletions(-) diff --git a/brkraw/__init__.py b/brkraw/__init__.py index 8ef9754..fbf5897 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,8 +1,8 @@ from .lib import * -from xnippy import XnippyManager +from xnippet import XnippetManager __version__ = '0.4.0' -config = XnippyManager(package_name=__package__, +config = XnippetManager(package_name=__package__, package_version=__version__, package__file__=__file__, config_filename='config.yaml') diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index a6bbfb6..50c4e4a 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,6 +1,5 @@ -# from .data import Study -from xnippy.snippet.plugin import PlugIn as PlugInSnippet -from xnippy.formatter import PathFormatter +from xnippet.snippet.plugin import PlugIn as PlugInSnippet +from xnippet.formatter import PathFormatter __all__ = ['Study', 'PlugInSnippet', 'PathFormatter'] \ No newline at end of file diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index de2712c..d1a29bc 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -34,7 +34,7 @@ from brkraw import config from brkraw.api.pvobj import PvStudy from brkraw.api.analyzer.base import BaseAnalyzer -from xnippy.parser import RecipeParser +from xnippet.parser import RecipeParser from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index bccfdd6..e26aa86 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -16,7 +16,7 @@ from collections import OrderedDict, defaultdict from pathlib import Path from .parameters import Parameter -from xnippy.formatter import PathFormatter +from xnippet.formatter import PathFormatter from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, List diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index 942992f..88bbc6b 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -3,7 +3,7 @@ bids, plugin """ from brkraw import __version__, config -from xnippy.module import ModuleCommander +from xnippet.module import ModuleCommander from brkraw.app.tonifti.plugin import ToNiftiPlugin, PvScan, PvReco, PvFiles from brkraw.app.tonifti.study import StudyToNifti, ScanToNifti diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index c265c63..8d72443 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -6,17 +6,17 @@ from .header import Header from brkraw.api.pvobj.base import BaseBufferHandler from brkraw.api.data import Scan -from xnippy.snippet import PlugInSnippet +from xnippet.snippet import PlugInSnippet from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, Union, Literal from typing import List from numpy.typing import NDArray - from xnippy.types import XnipyManagerType + from xnippet.types import XnippetManagerType class BaseMethods(BaseBufferHandler): - config: XnipyManagerType = config + config: XnippetManagerType = config def set_scale_mode(self, scale_mode: Optional[Literal['header', 'apply']] = None): diff --git a/brkraw/config.yaml b/brkraw/config.yaml index 208255e..29b69fe 100644 --- a/brkraw/config.yaml +++ b/brkraw/config.yaml @@ -1,4 +1,4 @@ -xnippy: +xnippet: repo: - name: brkraw-snippets url: https://github.com/brkraw/brkraw-snippets.git From 458c0e70a7e69924ea9c6fb38249f384c1586f8f Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 6 May 2024 00:40:27 -0400 Subject: [PATCH 16/16] dependencies, cleanup --- brkraw/api/__init__.py | 2 +- pyproject.toml | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 50c4e4a..5d4d0c7 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -2,4 +2,4 @@ from xnippet.formatter import PathFormatter -__all__ = ['Study', 'PlugInSnippet', 'PathFormatter'] \ No newline at end of file +__all__ = ['PlugInSnippet', 'PathFormatter'] \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index e1c0d02..4b63c30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,10 +13,8 @@ dependencies = [ 'pyyaml>=6.0.1', 'numpy>=1.18.0', 'tqdm>=4.45.0', - 'packaging>=23.1', - 'xnippy>=0.1.3', + 'xnippet>=0.1.0', 'nibabel>=3.0.2', - 'requests>=2.31.0', 'pandas>=1.0.0', 'openpyxl>=3.0.3', 'xlrd>=1.0.0',