diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/publish-pypi.yml similarity index 88% rename from .github/workflows/pypi-publish.yml rename to .github/workflows/publish-pypi.yml index 1ed6efa..c7d37df 100644 --- a/.github/workflows/pypi-publish.yml +++ b/.github/workflows/publish-pypi.yml @@ -12,12 +12,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - name: Set up Python 3.9 - uses: actions/setup-python@v2 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.9 + python-version: 3.11 # build SQLite from source, because I need 3.35<= - name: Download SQLite3 @@ -61,7 +61,7 @@ jobs: - run: touch ./docs/_build/html/.nojekyll - name: GH Pages Deployment - uses: JamesIves/github-pages-deploy-action@4.1.3 + uses: JamesIves/github-pages-deploy-action@v4 with: branch: gh-pages # The branch the action should deploy to. folder: ./docs/_build/html @@ -74,7 +74,7 @@ jobs: LD_LIBRARY_PATH: /usr/local/lib - name: Publish package - uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + uses: pypa/gh-action-pypi-publish@v1.12.2 with: user: __token__ password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.github/workflows/pypi-test.yml b/.github/workflows/run-tests.yml similarity index 91% rename from .github/workflows/pypi-test.yml rename to .github/workflows/run-tests.yml index 624ea5b..3a814ff 100644 --- a/.github/workflows/pypi-test.yml +++ b/.github/workflows/run-tests.yml @@ -11,13 +11,13 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ] name: Python ${{ matrix.python-version }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: "pip" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3c9601c..e60a5f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,19 +17,19 @@ repos: - id: mixed-line-ending args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows -- repo: https://github.com/PyCQA/docformatter - rev: v1.7.5 - hooks: - - id: docformatter - additional_dependencies: [tomli] - args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120] - # --config, ./pyproject.toml +# - repo: https://github.com/PyCQA/docformatter +# rev: master +# hooks: +# - id: docformatter +# additional_dependencies: [tomli] +# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120] +# # --config, ./pyproject.toml -- repo: https://github.com/psf/black - rev: 24.8.0 - hooks: - - id: black - language_version: python3 +# - repo: https://github.com/psf/black +# rev: 24.8.0 +# hooks: +# - id: black +# language_version: python3 - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. @@ -37,6 +37,7 @@ repos: hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format ## If like to embrace black styles even in the docs: # - repo: https://github.com/asottile/blacken-docs diff --git a/CHANGELOG.md b/CHANGELOG.md index c6b4075..cc01007 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## Version 0.3.0 + +- chore: Remove Python 3.8 (EOL). +- precommit: Replace docformatter with ruff's formatter. + ## Version 0.2.0 - Changes to support NumPy's 2.0 release. diff --git a/README.md b/README.md index f6b692c..f565faf 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![Twitter](https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Twitter)](https://twitter.com/scrnaseq) --> -[![Project generated with PyScaffold](https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold)](https://pyscaffold.org/) [![PyPI-Server](https://img.shields.io/pypi/v/scrnaseq.svg)](https://pypi.org/project/scrnaseq/) # scrnaseq diff --git a/docs/conf.py b/docs/conf.py index 16b0213..4ee6669 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -72,6 +72,7 @@ "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", + "sphinx_autodoc_typehints", ] # Add any paths that contain templates here, relative to this directory. diff --git a/pyproject.toml b/pyproject.toml index 0514df9..45716dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,10 @@ extend-ignore = ["F821"] [tool.ruff.pydocstyle] convention = "google" +[tool.ruff.format] +docstring-code-format = true +docstring-code-line-length = 20 + [tool.ruff.per-file-ignores] "__init__.py" = ["E402", "F401"] diff --git a/setup.cfg b/setup.cfg index 0947cc9..c7c89aa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,7 @@ package_dir = =src # Require a min/specific Python version (comma-separated conditions) -python_requires = >=3.8 +python_requires = >=3.9 # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in @@ -49,9 +49,9 @@ python_requires = >=3.8 # For more information, check out https://semver.org/. install_requires = importlib-metadata; python_version<"3.8" - dolomite_base + dolomite_base>=0.4.2 dolomite_matrix - dolomite_sce>=0.1.2 + dolomite_sce gypsum_client>=0.1.3 delayedarray>=0.5.1 summarizedexperiment diff --git a/src/scrnaseq/fetch_dataset.py b/src/scrnaseq/fetch_dataset.py index 723f61f..f59868b 100644 --- a/src/scrnaseq/fetch_dataset.py +++ b/src/scrnaseq/fetch_dataset.py @@ -41,7 +41,10 @@ def fetch_dataset( .. code-block:: python - sce = fetch_dataset("zeisel-brain-2015", "2023-12-14") + sce = fetch_dataset( + "zeisel-brain-2015", + "2023-12-14", + ) Args: name: @@ -83,12 +86,8 @@ def fetch_dataset( or one of its subclasses. """ - version_path = save_version( - package, name, version, cache_dir=cache_dir, overwrite=overwrite - ) - obj_path = ( - version_path if path is None else os.path.join(version_path, path.rstrip("/")) - ) + version_path = save_version(package, name, version, cache_dir=cache_dir, overwrite=overwrite) + obj_path = version_path if path is None else os.path.join(version_path, path.rstrip("/")) old = alt_read_object_function(single_cell_load_object) @@ -122,7 +121,10 @@ def fetch_metadata( .. code-block:: python - meta = fetch_metadata("zeisel-brain-2015", "2023-12-14") + meta = fetch_metadata( + "zeisel-brain-2015", + "2023-12-14", + ) Args: name: @@ -150,9 +152,7 @@ def fetch_metadata( Dictionary containing metadata for the specified dataset. """ remote_path = "_bioconductor.json" if path is None else f"{path}/_bioconductor.json" - local_path = save_file( - package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite - ) + local_path = save_file(package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite) with open(local_path, "r") as f: metadata = json.load(f) diff --git a/src/scrnaseq/list_datasets.py b/src/scrnaseq/list_datasets.py index f79cc84..9163266 100644 --- a/src/scrnaseq/list_datasets.py +++ b/src/scrnaseq/list_datasets.py @@ -14,16 +14,16 @@ @lru_cache -def list_datasets( - cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True -) -> pd.DataFrame: +def list_datasets(cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True) -> pd.DataFrame: """List all available datasets. Example: .. code-block:: python - datasets = list_datasets() + datasets = ( + list_datasets() + ) Args: cache_dir: @@ -83,9 +83,7 @@ def _format_query_results(results: list, key_names: list): def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "meta"): - _all_paths = [ - None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"] - ] + _all_paths = [None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]] df = pd.DataFrame( { @@ -105,33 +103,22 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met ) df["title"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title")) df["description"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title")) - df["taxonomy_id"] = _extract_charlist_from_json( - _all_metas, lambda x: x.get("taxonomy_id") - ) + df["taxonomy_id"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("taxonomy_id")) df["genome"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("genome")) df["rows"] = _extract_atomic_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("rows"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("rows"), ) df["columns"] = _extract_atomic_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("columns"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("columns"), ) df["assays"] = _extract_charlist_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("assays"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("assays"), ) df["column_annotations"] = _extract_charlist_from_json( _all_metas, @@ -155,15 +142,9 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met .get("alternative_experiments"), ) - df["bioconductor_version"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("bioconductor_version") - ) - df["maintainer_name"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("maintainer_name") - ) - df["maintainer_email"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("maintainer_email") - ) + df["bioconductor_version"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("bioconductor_version")) + df["maintainer_name"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_name")) + df["maintainer_email"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_email")) sources = [] for meta in _all_metas: @@ -186,9 +167,7 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met def _extract_atomic_from_json(metadata, extract): - return [ - extract(_meta) if extract(_meta) is not None else None for _meta in metadata - ] + return [extract(_meta) if extract(_meta) is not None else None for _meta in metadata] def _extract_charlist_from_json(metadata, extract): diff --git a/src/scrnaseq/list_versions.py b/src/scrnaseq/list_versions.py index ae1023b..0343aa9 100644 --- a/src/scrnaseq/list_versions.py +++ b/src/scrnaseq/list_versions.py @@ -14,7 +14,9 @@ def list_versions(name: str) -> List[str]: .. code-block:: python - versions = list_versions("romanov-brain-2017") + versions = list_versions( + "romanov-brain-2017" + ) Args: name: @@ -33,7 +35,9 @@ def fetch_latest_version(name: str) -> str: .. code-block:: python - version = fetch_latest_version("romanov-brain-2017") + version = fetch_latest_version( + "romanov-brain-2017" + ) Args: name: diff --git a/src/scrnaseq/save_dataset.py b/src/scrnaseq/save_dataset.py index 8d9a10c..246f7ad 100644 --- a/src/scrnaseq/save_dataset.py +++ b/src/scrnaseq/save_dataset.py @@ -54,15 +54,27 @@ def save_dataset(x: Any, path, metadata): # Fetch an existing dataset # or create your own ``SingleCellExperiment`` # or ``AnnData`` object. - sce = scrnaseq.fetch_dataset("zeisel-brain-2015", "2023-12-14") + sce = scrnaseq.fetch_dataset( + "zeisel-brain-2015", + "2023-12-14", + ) # Provide dataset level metadata for search and findability meta = { "title": "My dataset made from ziesel brain", "description": "This is a copy of the ziesel", - "taxonomy_id": ["10090"], # NCBI ID - "genome": ["GRCh38"], # genome build - "sources": [{"provider": "GEO", "id": "GSE12345"}], + "taxonomy_id": [ + "10090" + ], # NCBI ID + "genome": [ + "GRCh38" + ], # genome build + "sources": [ + { + "provider": "GEO", + "id": "GSE12345", + } + ], "maintainer_name": "Shizuka Mogami", "maintainer_email": "mogami.shizuka@765pro.com", } @@ -73,14 +85,18 @@ def save_dataset(x: Any, path, metadata): cache_dir = tempfile.mkdtemp() # Make sure the directory is clean - shutil.rmtree(cache_dir) + shutil.rmtree( + cache_dir + ) # Save the dataset - scrnaseq.save_dataset(sce, cache_dir, meta) + scrnaseq.save_dataset( + sce, + cache_dir, + meta, + ) """ - raise NotImplementedError( - f"'save_dataset' is not supported for objects of class: {type(x)}" - ) + raise NotImplementedError(f"'save_dataset' is not supported for objects of class: {type(x)}") def _save_se(x, path, metadata):