Skip to content

Commit 09cebb1

Browse files
committed
Merge branch 'master' into pre-commit-ci-update-config
2 parents b1044e2 + 063db66 commit 09cebb1

10 files changed

+57
-77
lines changed

.github/workflows/pypi-publish.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@ jobs:
1212
runs-on: ubuntu-latest
1313

1414
steps:
15-
- uses: actions/checkout@v2
15+
- uses: actions/checkout@v4
1616

17-
- name: Set up Python 3.9
18-
uses: actions/setup-python@v2
17+
- name: Set up Python 3.11
18+
uses: actions/setup-python@v5
1919
with:
20-
python-version: 3.9
20+
python-version: 3.11
2121

2222
# build SQLite from source, because I need 3.35<=
2323
- name: Download SQLite3

.github/workflows/pypi-test.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@ jobs:
1111
runs-on: ubuntu-latest
1212
strategy:
1313
matrix:
14-
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
14+
python-version: [ "3.9", "3.10", "3.11", "3.12" ]
1515

1616
name: Python ${{ matrix.python-version }}
1717
steps:
18-
- uses: actions/checkout@v2
18+
- uses: actions/checkout@v4
1919
- name: Setup Python
20-
uses: actions/setup-python@v2
20+
uses: actions/setup-python@v5
2121
with:
2222
python-version: ${{ matrix.python-version }}
2323
cache: "pip"

.pre-commit-config.yaml

+13-12
Original file line numberDiff line numberDiff line change
@@ -17,26 +17,27 @@ repos:
1717
- id: mixed-line-ending
1818
args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows
1919

20-
- repo: https://github.com/PyCQA/docformatter
21-
rev: v1.7.5
22-
hooks:
23-
- id: docformatter
24-
additional_dependencies: [tomli]
25-
args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
26-
# --config, ./pyproject.toml
20+
# - repo: https://github.com/PyCQA/docformatter
21+
# rev: master
22+
# hooks:
23+
# - id: docformatter
24+
# additional_dependencies: [tomli]
25+
# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
26+
# # --config, ./pyproject.toml
2727

28-
- repo: https://github.com/psf/black
29-
rev: 24.10.0
30-
hooks:
31-
- id: black
32-
language_version: python3
28+
# - repo: https://github.com/psf/black
29+
# rev: 24.8.0
30+
# hooks:
31+
# - id: black
32+
# language_version: python3
3333

3434
- repo: https://github.com/astral-sh/ruff-pre-commit
3535
# Ruff version.
3636
rev: v0.8.2
3737
hooks:
3838
- id: ruff
3939
args: [--fix, --exit-non-zero-on-fix]
40+
- id: ruff-format
4041

4142
## If like to embrace black styles even in the docs:
4243
# - repo: https://github.com/asottile/blacken-docs

CHANGELOG.md

+7-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
# Changelog
22

3-
## Version 0.1 (development)
3+
## Version 0.2.0
44

5-
- Feature A added
6-
- FIX: nasty bug #1729 fixed
7-
- add your changes here!
5+
- chore: Remove Python 3.8 (EOL)
6+
- precommit: Replace docformatter with ruff's formatter
7+
8+
## Version 0.1.0
9+
10+
- Initial version of the package.

README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
# celldex - reference cell type datasets
1616

1717
This package provides reference datasets with annotated cell types for convenient use by [BiocPy](https://github.com/biocpy) packages and workflows in Python.
18-
These references were sourced and uploaded by the [celldex R/Bioconductor](https://bioconductor.org/packages/devel/data/experiment/html/celldex.html) package.
19-
20-
Each dataset is loaded as a [`SummarizedExperiment`](https://bioconductor.org/packages/SummarizedExperiment) that is ready for further analysis, and may be used for downstream analysis, e.g in the [SingleR Python implementation](https://github.com/BiocPy/singler).
18+
These references were sourced and uploaded by the [**celldex** R/Bioconductor](https://bioconductor.org/packages/celldex) package.
2119

20+
Each dataset is loaded as a [`SummarizedExperiment`](https://bioconductor.org/packages/SummarizedExperiment) that is ready for further analysis, and may be used for downstream analysis,
21+
e.g in the [SingleR Python implementation](https://github.com/SingleR-inc/singler).
2222

2323
## Installation
2424

docs/conf.py

+9
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@
7272
"sphinx.ext.ifconfig",
7373
"sphinx.ext.mathjax",
7474
"sphinx.ext.napoleon",
75+
"sphinx_autodoc_typehints",
7576
]
7677

7778
# Add any paths that contain templates here, relative to this directory.
@@ -166,6 +167,14 @@
166167
# If this is True, todo emits a warning for each TODO entries. The default is False.
167168
todo_emit_warnings = True
168169

170+
autodoc_default_options = {
171+
'special-members': True,
172+
'undoc-members': False,
173+
'exclude-members': '__weakref__, __dict__, __str__, __module__, __init__'
174+
}
175+
176+
autosummary_generate = True
177+
autosummary_imported_members = True
169178

170179
# -- Options for HTML output -------------------------------------------------
171180

setup.cfg

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ license = MIT
1212
license_files = LICENSE.txt
1313
long_description = file: README.md
1414
long_description_content_type = text/markdown; charset=UTF-8; variant=GFM
15-
url = https://github.com/biocpy/celldex
15+
url = https://github.com/SingleR-inc/celldex
1616
# Add here related links, for example:
1717
project_urls =
18-
Documentation = https://github.com/biocpy/celldex
18+
Documentation = https://github.com/SingleR-inc/celldex
1919
# Source = https://github.com/pyscaffold/pyscaffold/
2020
# Changelog = https://pyscaffold.org/en/latest/changelog.html
2121
# Tracker = https://github.com/pyscaffold/pyscaffold/issues
@@ -41,7 +41,7 @@ package_dir =
4141
=src
4242

4343
# Require a min/specific Python version (comma-separated conditions)
44-
python_requires = >=3.8
44+
python_requires = >=3.9
4545

4646
# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0.
4747
# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in

src/celldex/fetch_reference.py

+3-9
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,8 @@ def fetch_reference(
8181
or one of its subclasses.
8282
"""
8383

84-
version_path = save_version(
85-
package, name, version, cache_dir=cache_dir, overwrite=overwrite
86-
)
87-
obj_path = (
88-
version_path if path is None else os.path.join(version_path, path.rstrip("/"))
89-
)
84+
version_path = save_version(package, name, version, cache_dir=cache_dir, overwrite=overwrite)
85+
obj_path = version_path if path is None else os.path.join(version_path, path.rstrip("/"))
9086

9187
old = alt_read_object_function(celldex_load_object)
9288

@@ -147,9 +143,7 @@ def fetch_metadata(
147143
Dictionary containing metadata for the specified dataset.
148144
"""
149145
remote_path = "_bioconductor.json" if path is None else f"{path}/_bioconductor.json"
150-
local_path = save_file(
151-
package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite
152-
)
146+
local_path = save_file(package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite)
153147

154148
with open(local_path, "r") as f:
155149
metadata = json.load(f)

src/celldex/list_references.py

+10-33
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@
1414

1515

1616
@lru_cache
17-
def list_references(
18-
cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True
19-
) -> pd.DataFrame:
17+
def list_references(cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True) -> pd.DataFrame:
2018
"""List all available reference datasets.
2119
2220
Example:
@@ -83,9 +81,7 @@ def _format_query_results(results: list, key_names: list):
8381

8482

8583
def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "meta"):
86-
_all_paths = [
87-
None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]
88-
]
84+
_all_paths = [None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]]
8985

9086
df = pd.DataFrame(
9187
{
@@ -105,33 +101,22 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
105101
)
106102
df["title"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
107103
df["description"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
108-
df["taxonomy_id"] = _extract_charlist_from_json(
109-
_all_metas, lambda x: x.get("taxonomy_id")
110-
)
104+
df["taxonomy_id"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("taxonomy_id"))
111105
df["genome"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("genome"))
112106

113107
df["rows"] = _extract_atomic_from_json(
114108
_all_metas,
115-
lambda x: x.get("applications", {})
116-
.get("takane", {})
117-
.get("summarized_experiment", {})
118-
.get("rows"),
109+
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("rows"),
119110
)
120111

121112
df["columns"] = _extract_atomic_from_json(
122113
_all_metas,
123-
lambda x: x.get("applications", {})
124-
.get("takane", {})
125-
.get("summarized_experiment", {})
126-
.get("columns"),
114+
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("columns"),
127115
)
128116

129117
df["assays"] = _extract_charlist_from_json(
130118
_all_metas,
131-
lambda x: x.get("applications", {})
132-
.get("takane", {})
133-
.get("summarized_experiment", {})
134-
.get("assays"),
119+
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("assays"),
135120
)
136121
df["column_annotations"] = _extract_charlist_from_json(
137122
_all_metas,
@@ -155,15 +140,9 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
155140
.get("alternative_experiments"),
156141
)
157142

158-
df["bioconductor_version"] = _extract_atomic_from_json(
159-
_all_metas, lambda x: x.get("bioconductor_version")
160-
)
161-
df["maintainer_name"] = _extract_atomic_from_json(
162-
_all_metas, lambda x: x.get("maintainer_name")
163-
)
164-
df["maintainer_email"] = _extract_atomic_from_json(
165-
_all_metas, lambda x: x.get("maintainer_email")
166-
)
143+
df["bioconductor_version"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("bioconductor_version"))
144+
df["maintainer_name"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_name"))
145+
df["maintainer_email"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_email"))
167146

168147
sources = []
169148
for meta in _all_metas:
@@ -186,9 +165,7 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
186165

187166

188167
def _extract_atomic_from_json(metadata, extract):
189-
return [
190-
extract(_meta) if extract(_meta) is not None else None for _meta in metadata
191-
]
168+
return [extract(_meta) if extract(_meta) is not None else None for _meta in metadata]
192169

193170

194171
def _extract_charlist_from_json(metadata, extract):

src/celldex/save_reference.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,7 @@ def save_reference(x: Any, labels: List[str], path: str, metadata: dict):
9191
# Save the reference
9292
celldex.save_reference(sce, cache_dir, meta)
9393
"""
94-
raise NotImplementedError(
95-
f"'save_dataset' is not supported for objects of class: {type(x)}"
96-
)
94+
raise NotImplementedError(f"'save_dataset' is not supported for objects of class: {type(x)}")
9795

9896

9997
def _save_se(x: SummarizedExperiment, path, metadata):
@@ -112,9 +110,7 @@ def _save_se(x: SummarizedExperiment, path, metadata):
112110
for _cn in _cols.get_column_names():
113111
_data = _cols.get_column(_cn)
114112
if not all(isinstance(y, str) for y in _data):
115-
raise ValueError(
116-
f"All labels in 'column_data' must be a list of strings; column {_cn} does not."
117-
)
113+
raise ValueError(f"All labels in 'column_data' must be a list of strings; column {_cn} does not.")
118114

119115
if "logcounts" not in list(x.get_assay_names()):
120116
raise ValueError("Assay 'logcounts' does not exist.")

0 commit comments

Comments
 (0)