Skip to content

Commit

Permalink
chore: remove Python 3.8 support (#19)
Browse files Browse the repository at this point in the history
  • Loading branch information
jkanche authored Dec 28, 2024
1 parent dcd74a5 commit b1cd477
Show file tree
Hide file tree
Showing 12 changed files with 90 additions and 81 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ jobs:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4

- name: Set up Python 3.9
uses: actions/setup-python@v2
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: 3.9
python-version: 3.11

# build SQLite from source, because I need 3.35<=
- name: Download SQLite3
Expand Down Expand Up @@ -61,7 +61,7 @@ jobs:
- run: touch ./docs/_build/html/.nojekyll

- name: GH Pages Deployment
uses: JamesIves/github-pages-deploy-action@4.1.3
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: gh-pages # The branch the action should deploy to.
folder: ./docs/_build/html
Expand All @@ -74,7 +74,7 @@ jobs:
LD_LIBRARY_PATH: /usr/local/lib

- name: Publish package
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
uses: pypa/gh-action-pypi-publish@v1.12.2
with:
user: __token__
password: ${{ secrets.PYPI_PASSWORD }}
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ]

name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
Expand Down
25 changes: 13 additions & 12 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,27 @@ repos:
- id: mixed-line-ending
args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows

- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
additional_dependencies: [tomli]
args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# --config, ./pyproject.toml
# - repo: https://github.com/PyCQA/docformatter
# rev: master
# hooks:
# - id: docformatter
# additional_dependencies: [tomli]
# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# # --config, ./pyproject.toml

- repo: https://github.com/psf/black
rev: 24.8.0
hooks:
- id: black
language_version: python3
# - repo: https://github.com/psf/black
# rev: 24.8.0
# hooks:
# - id: black
# language_version: python3

- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.8
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- id: ruff-format

## If like to embrace black styles even in the docs:
# - repo: https://github.com/asottile/blacken-docs
Expand Down
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

## Version 0.3.0

- chore: Remove Python 3.8 (EOL).
- precommit: Replace docformatter with ruff's formatter.

## Version 0.2.0

- Changes to support NumPy's 2.0 release.
Expand Down
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
[![Twitter](https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Twitter)](https://twitter.com/scrnaseq)
-->

[![Project generated with PyScaffold](https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold)](https://pyscaffold.org/)
[![PyPI-Server](https://img.shields.io/pypi/v/scrnaseq.svg)](https://pypi.org/project/scrnaseq/)

# scrnaseq
Expand Down
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
]

# Add any paths that contain templates here, relative to this directory.
Expand Down
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ extend-ignore = ["F821"]
[tool.ruff.pydocstyle]
convention = "google"

[tool.ruff.format]
docstring-code-format = true
docstring-code-line-length = 20

[tool.ruff.per-file-ignores]
"__init__.py" = ["E402", "F401"]

Expand Down
6 changes: 3 additions & 3 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,17 @@ package_dir =
=src

# Require a min/specific Python version (comma-separated conditions)
python_requires = >=3.8
python_requires = >=3.9

# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0.
# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in
# new major versions. This works if the required packages follow Semantic Versioning.
# For more information, check out https://semver.org/.
install_requires =
importlib-metadata; python_version<"3.8"
dolomite_base
dolomite_base>=0.4.2
dolomite_matrix
dolomite_sce>=0.1.2
dolomite_sce
gypsum_client>=0.1.3
delayedarray>=0.5.1
summarizedexperiment
Expand Down
22 changes: 11 additions & 11 deletions src/scrnaseq/fetch_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,10 @@ def fetch_dataset(
.. code-block:: python
sce = fetch_dataset("zeisel-brain-2015", "2023-12-14")
sce = fetch_dataset(
"zeisel-brain-2015",
"2023-12-14",
)
Args:
name:
Expand Down Expand Up @@ -83,12 +86,8 @@ def fetch_dataset(
or one of its subclasses.
"""

version_path = save_version(
package, name, version, cache_dir=cache_dir, overwrite=overwrite
)
obj_path = (
version_path if path is None else os.path.join(version_path, path.rstrip("/"))
)
version_path = save_version(package, name, version, cache_dir=cache_dir, overwrite=overwrite)
obj_path = version_path if path is None else os.path.join(version_path, path.rstrip("/"))

old = alt_read_object_function(single_cell_load_object)

Expand Down Expand Up @@ -122,7 +121,10 @@ def fetch_metadata(
.. code-block:: python
meta = fetch_metadata("zeisel-brain-2015", "2023-12-14")
meta = fetch_metadata(
"zeisel-brain-2015",
"2023-12-14",
)
Args:
name:
Expand Down Expand Up @@ -150,9 +152,7 @@ def fetch_metadata(
Dictionary containing metadata for the specified dataset.
"""
remote_path = "_bioconductor.json" if path is None else f"{path}/_bioconductor.json"
local_path = save_file(
package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite
)
local_path = save_file(package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite)

with open(local_path, "r") as f:
metadata = json.load(f)
Expand Down
47 changes: 13 additions & 34 deletions src/scrnaseq/list_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@


@lru_cache
def list_datasets(
cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True
) -> pd.DataFrame:
def list_datasets(cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True) -> pd.DataFrame:
"""List all available datasets.
Example:
.. code-block:: python
datasets = list_datasets()
datasets = (
list_datasets()
)
Args:
cache_dir:
Expand Down Expand Up @@ -83,9 +83,7 @@ def _format_query_results(results: list, key_names: list):


def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "meta"):
_all_paths = [
None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]
]
_all_paths = [None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]]

df = pd.DataFrame(
{
Expand All @@ -105,33 +103,22 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
)
df["title"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
df["description"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
df["taxonomy_id"] = _extract_charlist_from_json(
_all_metas, lambda x: x.get("taxonomy_id")
)
df["taxonomy_id"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("taxonomy_id"))
df["genome"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("genome"))

df["rows"] = _extract_atomic_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("rows"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("rows"),
)

df["columns"] = _extract_atomic_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("columns"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("columns"),
)

df["assays"] = _extract_charlist_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("assays"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("assays"),
)
df["column_annotations"] = _extract_charlist_from_json(
_all_metas,
Expand All @@ -155,15 +142,9 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
.get("alternative_experiments"),
)

df["bioconductor_version"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("bioconductor_version")
)
df["maintainer_name"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("maintainer_name")
)
df["maintainer_email"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("maintainer_email")
)
df["bioconductor_version"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("bioconductor_version"))
df["maintainer_name"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_name"))
df["maintainer_email"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_email"))

sources = []
for meta in _all_metas:
Expand All @@ -186,9 +167,7 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met


def _extract_atomic_from_json(metadata, extract):
return [
extract(_meta) if extract(_meta) is not None else None for _meta in metadata
]
return [extract(_meta) if extract(_meta) is not None else None for _meta in metadata]


def _extract_charlist_from_json(metadata, extract):
Expand Down
8 changes: 6 additions & 2 deletions src/scrnaseq/list_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def list_versions(name: str) -> List[str]:
.. code-block:: python
versions = list_versions("romanov-brain-2017")
versions = list_versions(
"romanov-brain-2017"
)
Args:
name:
Expand All @@ -33,7 +35,9 @@ def fetch_latest_version(name: str) -> str:
.. code-block:: python
version = fetch_latest_version("romanov-brain-2017")
version = fetch_latest_version(
"romanov-brain-2017"
)
Args:
name:
Expand Down
34 changes: 25 additions & 9 deletions src/scrnaseq/save_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,27 @@ def save_dataset(x: Any, path, metadata):
# Fetch an existing dataset
# or create your own ``SingleCellExperiment``
# or ``AnnData`` object.
sce = scrnaseq.fetch_dataset("zeisel-brain-2015", "2023-12-14")
sce = scrnaseq.fetch_dataset(
"zeisel-brain-2015",
"2023-12-14",
)
# Provide dataset level metadata for search and findability
meta = {
"title": "My dataset made from ziesel brain",
"description": "This is a copy of the ziesel",
"taxonomy_id": ["10090"], # NCBI ID
"genome": ["GRCh38"], # genome build
"sources": [{"provider": "GEO", "id": "GSE12345"}],
"taxonomy_id": [
"10090"
], # NCBI ID
"genome": [
"GRCh38"
], # genome build
"sources": [
{
"provider": "GEO",
"id": "GSE12345",
}
],
"maintainer_name": "Shizuka Mogami",
"maintainer_email": "[email protected]",
}
Expand All @@ -73,14 +85,18 @@ def save_dataset(x: Any, path, metadata):
cache_dir = tempfile.mkdtemp()
# Make sure the directory is clean
shutil.rmtree(cache_dir)
shutil.rmtree(
cache_dir
)
# Save the dataset
scrnaseq.save_dataset(sce, cache_dir, meta)
scrnaseq.save_dataset(
sce,
cache_dir,
meta,
)
"""
raise NotImplementedError(
f"'save_dataset' is not supported for objects of class: {type(x)}"
)
raise NotImplementedError(f"'save_dataset' is not supported for objects of class: {type(x)}")


def _save_se(x, path, metadata):
Expand Down

0 comments on commit b1cd477

Please sign in to comment.