diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 1169808..e705eb4 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -32,8 +32,8 @@ jobs:
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
- # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+ # exit-zero treats all errors as warnings
+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics
- name: Test with pytest
run: |
pytest --nbval doc/tutorial.ipynb --sanitize-with doc/pytest-sanitize.ini --cov=thunor
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml
index c73bae5..1bf20ab 100644
--- a/.github/workflows/python-publish.yml
+++ b/.github/workflows/python-publish.yml
@@ -25,10 +25,9 @@ jobs:
python-version: '3.x'
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- pip install setuptools wheel twine
+ python -m pip install --upgrade pip build
- name: Build package
run: |
- python setup.py sdist bdist_wheel
+ python -m build --sdist --wheel .
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/doc/conf.py b/doc/conf.py
index be39a86..d42e825 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -16,8 +16,11 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
+import mock
import os
import sys
+import thunor
+import pkg_resources
import datetime
sys.path.insert(0, os.path.abspath('../'))
@@ -59,8 +62,6 @@
copyright = u'2017-' + str(datetime.datetime.now().year) + u' Alex Lubbock'
author = u'Alex Lubbock'
-import thunor, pkg_resources
-
# The full version, including alpha/beta/rc tags.
release = thunor.__version__
# The short X.Y version.
@@ -152,8 +153,8 @@
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
-# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or
+# 32x32 pixels large.
#
# html_favicon = None
@@ -328,10 +329,13 @@
'Miscellaneous'),
]
+
def setup(app):
- app.add_js_file('https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js')
+ app.add_js_file(
+ 'https://cdnjs.cloudflare.com/ajax/libs/'
+ 'require.js/2.1.10/require.min.js'
+ )
-import mock
for mod_name in ('plotly', 'plotly.graph_objs', 'tables'):
sys.modules[mod_name] = mock.MagicMock()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..20addb0
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,31 @@
+[project]
+name = "thunor"
+dynamic = ["version", "dependencies"]
+description = "Dose response curve and drug induced proliferation (DIP) rate fits and visualisation"
+authors = [
+ {name = "Alex Lubbock", email = "code@alexlubbock.com"},
+]
+requires-python = ">=3.10"
+readme = "README.md"
+license = {text = "GPL-3.0-only"}
+classifiers = [
+ "Intended Audience :: Science/Research",
+ "Programming Language :: Python",
+ "Topic :: Scientific/Engineering :: Bio-Informatics",
+ "Topic :: Scientific/Engineering :: Chemistry",
+ "Topic :: Scientific/Engineering :: Medical Science Apps.",
+]
+
+[project.urls]
+Homepage = "https://www.thunor.net"
+
+[build-system]
+requires = ["setuptools", "versioneer-518"]
+build-backend = "setuptools.build_meta"
+
+[tool.pytest]
+norecursedirs = "doc/_build"
+
+[tool.flake8]
+extend-ignore = "E203"
+max-line-length = 88
diff --git a/setup.cfg b/setup.cfg
index f16db7a..fc194c0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,6 +5,3 @@ versionfile_source = thunor/_version.py
versionfile_build = thunor/_version.py
tag_prefix = v
parentdir_prefix = thunor-
-
-[tool:pytest]
-norecursedirs = doc/_build
diff --git a/setup.py b/setup.py
index a97f5fb..1ad2e8e 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,8 @@ def main():
author_email='code@alexlubbock.com',
url='https://www.thunor.net',
packages=['thunor', 'thunor.converters'],
- install_requires=['numpy', 'scipy', 'pandas', 'plotly', 'seaborn', 'tables'],
+ install_requires=['numpy', 'scipy', 'pandas', 'plotly', 'seaborn',
+ 'tables'],
tests_require=['pytest', 'nbval', 'django', 'nbformat'],
cmdclass=versioneer.get_cmdclass(),
zip_safe=True,
diff --git a/thunor/_version.py b/thunor/_version.py
index e7c40db..f8fa6d1 100644
--- a/thunor/_version.py
+++ b/thunor/_version.py
@@ -378,7 +378,8 @@ def git_pieces_from_vcs(
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
+ date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"],
+ cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
@@ -468,10 +469,12 @@ def render_pep440_pre(pieces: Dict[str, Any]) -> str:
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
- tag_version, post_version = pep440_split_post(pieces["closest-tag"])
+ tag_version, post_version = pep440_split_post(
+ pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
- rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
+ rendered += ".post%d.dev%d" % (post_version + 1,
+ pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
diff --git a/thunor/converters/__init__.py b/thunor/converters/__init__.py
index a6acf29..ccc815b 100644
--- a/thunor/converters/__init__.py
+++ b/thunor/converters/__init__.py
@@ -2,5 +2,5 @@
from .ctrp2 import convert_ctrp
from .teicher import convert_teicher
-__all__ = ['convert_gdsc', 'convert_gdsc_tags', 'convert_ctrp',
- 'convert_teicher']
+__all__ = ["convert_gdsc", "convert_gdsc_tags", "convert_ctrp",
+ "convert_teicher"]
diff --git a/thunor/converters/ctrp2.py b/thunor/converters/ctrp2.py
index 8183004..07d9ba0 100644
--- a/thunor/converters/ctrp2.py
+++ b/thunor/converters/ctrp2.py
@@ -168,8 +168,8 @@ def convert_ctrp(directory='.',
python -c "from thunor.converters import convert_ctrp; convert_ctrp()"
This script will take several minutes to run, please be patient. It is also
- resource-intensive, due to the size of the dataset. We recommend you utilize
- the highest-spec machine that you have available.
+ resource-intensive, due to the size of the dataset. We recommend you
+ utilize the highest-spec machine that you have available.
This will output a file called (by default) :file:`ctrp_v2.h5`,
which can be opened with :func:`thunor.io.read_hdf()`, or used with Thunor
diff --git a/thunor/converters/gdsc.py b/thunor/converters/gdsc.py
index 000994e..0b1fd8d 100644
--- a/thunor/converters/gdsc.py
+++ b/thunor/converters/gdsc.py
@@ -69,7 +69,7 @@ def import_gdsc(drug_list_file, screen_data_file):
df = screen_data
# Drop the blank wells (no cells, no drugs)
- df.drop(list(df.filter(regex='blank\d+')), axis=1, inplace=True)
+ df.drop(list(df.filter(regex=r'blank\d+')), axis=1, inplace=True)
# Merge in the drug names
df = df.merge(drug_ids, left_on='DRUG_ID', right_index=True)
@@ -151,7 +151,8 @@ def convert_gdsc_tags(cell_line_file='Cell_Lines_Details.xlsx',
You can run this function at the command line to convert the files;
assuming the downloaded file is in the current directory, simply run::
- python -c "from thunor.converters import convert_gdsc_tags; convert_gdsc_tags()"
+ python -c "from thunor.converters import convert_gdsc_tags; \
+ convert_gdsc_tags()"
This will output a file called (by default)
:file:`gdsc_cell_line_primary_site_tags.txt`, which can be loaded into
@@ -203,8 +204,8 @@ def convert_gdsc(drug_list_file='Screened_Compounds.xlsx',
Please note that the layout of wells in each plate after conversion is
arbitrary, since this information is not in the original files.
- Please make sure you have the "tables" and "xlrd" python packages installed,
- in addition to the standard Thunor Core requirements.
+ Please make sure you have the "tables" and "xlrd" python packages
+ installed, in addition to the standard Thunor Core requirements.
You can run this function at the command line to convert the files;
assuming the two files are in the current directory, simply run::
@@ -212,8 +213,8 @@ def convert_gdsc(drug_list_file='Screened_Compounds.xlsx',
python -c "from thunor.converters import convert_gdsc; convert_gdsc()"
This script will take several minutes to run, please be patient. It is also
- resource-intensive, due to the size of the dataset. We recommend you utilize
- the highest-spec machine that you have available.
+ resource-intensive, due to the size of the dataset. We recommend you
+ utilize the highest-spec machine that you have available.
This will output a file called (by default) :file:`gdsc-v17a.h5`,
which can be opened with :func:`thunor.io.read_hdf()`, or used with Thunor
diff --git a/thunor/converters/teicher.py b/thunor/converters/teicher.py
index 1da8073..d5760b7 100644
--- a/thunor/converters/teicher.py
+++ b/thunor/converters/teicher.py
@@ -88,7 +88,8 @@ def convert_teicher(directory='.', output_file='teicher.h5'):
Unzip the downloaded file. The dataset can then be converted on the command
line::
- python -c "from thunor.converters import convert_teicher; convert_teicher()"
+ python -c "from thunor.converters import convert_teicher; \
+ convert_teicher()"
Please note that the layout of wells in each plate after conversion is
arbitrary, since this information is not in the original files.
diff --git a/thunor/curve_fit.py b/thunor/curve_fit.py
index 6c363cf..b2ff03a 100644
--- a/thunor/curve_fit.py
+++ b/thunor/curve_fit.py
@@ -507,7 +507,8 @@ def fit_drc(doses, responses, response_std_errs=None, fit_cls=HillCurveLL4,
except TypeError as te:
# This occurs if there are fewer data points than parameters
te_str = str(te)
- if 'Improper input:' in te_str or te_str.startswith('The number of func parameters'):
+ if 'Improper input:' in te_str or te_str.startswith(
+ 'The number of func parameters'):
warnings.warn(te_str)
return None
else:
@@ -950,7 +951,7 @@ def _generate_label(index):
if not is_viability and include_emax:
divisor = base_params['fit_obj'].apply(lambda fo: fo.divisor if fo
- else None)
+ else None)
base_params['emax_rel'] = base_params['emax'] / divisor
base_params['emax_obs_rel'] = base_params['emax_obs'] / divisor
@@ -1004,7 +1005,7 @@ def _attach_response_values(df_params, ctrl_dip_data, expt_dip_data,
doses_expt = [d[0] for d in dip_grp.index.get_level_values(
'dose').values]
fit_data = {'dataset_id': grp[0],
- 'cell_line': grp[1], 'drug': grp[2][0]}
+ 'cell_line': grp[1], 'drug': grp[2][0]}
ctrl_dip_data_cl = \
_get_control_responses(ctrl_dip_data, grp[0], grp[1],
diff --git a/thunor/dip.py b/thunor/dip.py
index 09daf91..56a1744 100644
--- a/thunor/dip.py
+++ b/thunor/dip.py
@@ -72,8 +72,9 @@ def dip_rates(df_data, selector_fn=tyson1):
df_assays = df_data.assays.loc[df_data.dip_assay_name]
- return ctrl_dips, \
- expt_dip_rates(df_data.doses, df_assays, selector_fn=selector_fn)
+ return ctrl_dips, expt_dip_rates(df_data.doses,
+ df_assays,
+ selector_fn=selector_fn)
def expt_dip_rates(df_doses, df_vals, selector_fn=tyson1):
diff --git a/thunor/io.py b/thunor/io.py
index 2301bcf..fcc92f2 100644
--- a/thunor/io.py
+++ b/thunor/io.py
@@ -122,9 +122,10 @@ def well_name_to_id(self, well_name, raise_error=True):
raise ValueError('Well name too short')
if len(well_name) > 2 and well_name[1].isalpha():
- row_num_mult = ord(well_name[0]) - 64 # one-based
+ row_num_mult = ord(well_name[0]) - 64 # one-based
if row_num_mult < 0 or row_num_mult > 25:
- raise ValueError('First letter is not capital alphanumeric')
+ raise ValueError(
+ 'First letter is not capital alphanumeric')
row_num = ord(well_name[1]) - 65 # zero-based
row_num += (row_num_mult * 26)
col_num_start = 2
@@ -156,7 +157,8 @@ def well_iterator(self):
-------
Iterator of dict
Iterator over the wells in the plate. Each well is given as a dict
- of 'well' (well ID), 'row' (row character) and 'col' (column number)
+ of 'well' (well ID), 'row' (row character) and 'col'
+ (column number)
"""
row_it = iter(np.repeat(list(self.row_iterator()), self.width))
col_it = itertools.cycle(self.col_iterator())
@@ -501,8 +503,7 @@ def _read_vanderbilt_hts_single_df(file_or_source, plate_width=24,
converters={
'time': _time_parser,
'well': lambda w: pm.well_name_to_id(w),
- 'expt.date': lambda
- d: datetime.strptime(
+ 'expt.date': lambda d: datetime.strptime(
d, '%Y-%m-%d').date()
},
sep=sep
@@ -517,7 +518,8 @@ def _read_vanderbilt_hts_single_df(file_or_source, plate_width=24,
elif errstr.startswith('invalid literal for int() with base 10'):
raise PlateFileParseException(
'Invalid value for cell count ({})'.format(errstr))
- elif errstr.startswith('time data') and 'does not match format' in errstr:
+ elif errstr.startswith('time data') and \
+ 'does not match format' in errstr:
raise PlateFileParseException(
'Date format should be YYYY-MM-DD ({})'.format(errstr))
else:
@@ -526,7 +528,8 @@ def _read_vanderbilt_hts_single_df(file_or_source, plate_width=24,
try:
df.set_index(['upid', 'well'], inplace=True)
except KeyError:
- raise PlateFileParseException('Please ensure columns "upid" and "well" are present')
+ raise PlateFileParseException(
+ 'Please ensure columns "upid" and "well" are present')
required_columns = {'upid', 'cell.count', 'time'}
missing_cols = required_columns.difference(set(df.columns))
@@ -644,8 +647,7 @@ def read_vanderbilt_hts(file_or_source, plate_width=24, plate_height=16,
if du != 'M':
raise PlateFileParseException(
- 'Only supported drug concentration unit is M (not {})'.
- format(du))
+ f'Only supported drug concentration unit is M (not {du})')
drug_nums.append(drug_no)
drug_no += 1
@@ -653,8 +655,8 @@ def read_vanderbilt_hts(file_or_source, plate_width=24, plate_height=16,
if drug_nums:
if 'cell.line' not in df.columns:
raise PlateFileParseException(
- 'cell.line column is not present, but drug and/or dose columns '
- 'are present. ' + ANNOTATION_MSG
+ 'cell.line column is not present, but drug and/or dose '
+ 'columns are present. ' + ANNOTATION_MSG
)
else:
if 'cell.line' in df.columns:
@@ -690,8 +692,8 @@ def read_vanderbilt_hts(file_or_source, plate_width=24, plate_height=16,
# Check for duplicate time point definitions
dup_timepoints = df.set_index('time', append=True)
if dup_timepoints.index.duplicated().any():
- dups = dup_timepoints.loc[dup_timepoints.index.duplicated(),
- :].index.tolist()
+ dups = dup_timepoints.loc[
+ dup_timepoints.index.duplicated(), :].index.tolist()
n_dups = len(dups)
first_dup = dups[0]
@@ -723,7 +725,7 @@ def read_vanderbilt_hts(file_or_source, plate_width=24, plate_height=16,
zip(df_doses["upid"], df_doses["well"])]))
df_doses = df_doses.drop_duplicates(subset='well')
col_renames = {'drug{}.conc'.format(n): 'dose{}'.format(n) for
- n in drug_nums}
+ n in drug_nums}
col_renames.update({
'cell.line': 'cell_line',
'well': 'well_id',
@@ -1006,7 +1008,7 @@ def read_incucyte(filename_or_buffer, plate_width=24, plate_height=16):
elif hasattr(filename_or_buffer, 'name'):
plate_name = filename_or_buffer.name
- def _incucyte_header(filedat):
+ def _incucyte_header(filedat, plate_name, cell_type):
for line_no, line in enumerate(filedat):
if line.startswith(LABEL_STR):
new_plate_name = line[len(LABEL_STR):].strip()
@@ -1015,18 +1017,20 @@ def _incucyte_header(filedat):
elif line.startswith(CELL_TYPE_STR):
cell_type = line[len(CELL_TYPE_STR):].strip()
elif line.startswith(TSV_START_STR):
- return line_no
- return None
+ return line_no, plate_name, cell_type
+ return None, plate_name, cell_type
if isinstance(filename_or_buffer, io.BytesIO):
filedat = io.TextIOWrapper(filename_or_buffer,
encoding='utf-8')
- line_no = _incucyte_header(filedat)
+ line_no, plate_name, cell_type = _incucyte_header(
+ filedat, plate_name, cell_type)
filedat.detach()
filename_or_buffer.seek(0)
else:
with open(filename_or_buffer, 'r') as f:
- line_no = _incucyte_header(f)
+ line_no, plate_name, cell_type = _incucyte_header(
+ f, plate_name, cell_type)
if line_no is None:
raise PlateFileParseException('Does not appear to be an Incucyte '
diff --git a/thunor/plots.py b/thunor/plots.py
index 99a6bcb..f838465 100644
--- a/thunor/plots.py
+++ b/thunor/plots.py
@@ -95,8 +95,8 @@ def _param_na_first(param_id):
param_id = _remove_drmetric_prefix(param_id)
# Which is first for E, Emax, Erel, AA and Hill
return param_id in ('hill', 'aa', 'emax', 'emax_rel', 'einf') \
- or E_REGEX.match(param_id) \
- or E_REL_REGEX.match(param_id)
+ or E_REGEX.match(param_id) \
+ or E_REL_REGEX.match(param_id)
def _get_param_name(param_id):
@@ -366,8 +366,8 @@ def plot_drc(fit_params, is_absolute=False, color_by=None, color_groups=None,
'width': 3},
hoverinfo=hoverinfo,
legendgroup=legend_grp,
- showlegend=not show_replicates or
- multi_dataset,
+ showlegend=(not show_replicates or
+ multi_dataset),
visible=visible,
name=group_name_disp)
)
@@ -478,13 +478,14 @@ def plot_drc(fit_params, is_absolute=False, color_by=None, color_groups=None,
layout = go.Layout(title=title,
hovermode='closest' if show_replicates
or len(traces) > 50 else 'x',
- xaxis={'title': 'Dose (M)',
- 'range': (xaxis_min, xaxis_max),
- 'type': 'log'},
- yaxis={'title': yaxis_title,
- 'range': yaxis_range,
- 'rangemode': yaxis_rangemode
- },
+ xaxis={
+ 'title': 'Dose (M)',
+ 'range': (xaxis_min, xaxis_max),
+ 'type': 'log'},
+ yaxis={
+ 'title': yaxis_title,
+ 'range': yaxis_range,
+ 'rangemode': yaxis_rangemode},
annotations=annotations,
template=template
)
@@ -595,7 +596,7 @@ def plot_drug_combination_heatmap(
def _symbols_hovertext_two_dataset_scatter(df_params, range_bounded_params,
fit_param, dataset_names):
symbols = ['circle'] * len(df_params.index)
- hovertext = [" ".join(l) for l in df_params.index.values]
+ hovertext = [" ".join(lbl) for lbl in df_params.index.values]
for param in range_bounded_params:
msg = _out_of_range_msg(param)
for i in (0, 1):
@@ -751,9 +752,10 @@ def plot_two_dataset_param_scatter(df_params, fit_param, title, subtitle,
line=dict(
color="darkorange"
),
- name='{} vs {} {} Linear Fit'.format(dataset_names[0],
- dataset_names[1],
- param_name),
+ name='{} vs {} {} Linear Fit'.format(
+ dataset_names[0],
+ dataset_names[1],
+ param_name),
showlegend=False
))
layout['annotations'] = [{
@@ -775,8 +777,8 @@ def plot_two_dataset_param_scatter(df_params, fit_param, title, subtitle,
dat = df_params[df_params.index.get_level_values(
'cell_line' if color_by == 'cl' else 'drug').isin(
color_groups[tag_name])]
- symbols, hovertext = _symbols_hovertext_two_dataset_scatter(dat, range_bounded_params,
- fit_param, dataset_names)
+ symbols, hovertext = _symbols_hovertext_two_dataset_scatter(
+ dat, range_bounded_params, fit_param, dataset_names)
fit_param_data = dat.loc[:, fit_param]
xdat = fit_param_data.iloc[:, 0]
@@ -912,7 +914,9 @@ def plot_drc_params(df_params, fit_param,
if multi_dataset and not color_by:
color_by_col = 'dataset_id'
color_by = 'dataset'
- color_groups = {dataset: [dataset] for dataset in df_params.index.get_level_values('dataset_id').unique()}
+ color_groups = {dataset: [dataset]
+ for dataset in df_params.index.get_level_values(
+ 'dataset_id').unique()}
colours = _sns_to_rgb(sns.color_palette("husl", 2))
elif color_by:
color_by_col = 'cell_line' if color_by == 'cl' else 'drug'
@@ -1052,7 +1056,8 @@ def plot_drc_params(df_params, fit_param,
if color_by:
for idx, tag_name in enumerate(color_groups):
- location = df_params.index.get_level_values(color_by_col).isin(color_groups[tag_name])
+ location = df_params.index.get_level_values(
+ color_by_col).isin(color_groups[tag_name])
dat = df_params[location]
symbols, hovertext = _symbols_hovertext_two_param_scatter(
dat, range_bounded_params)
@@ -1215,8 +1220,7 @@ def plot_drc_params(df_params, fit_param,
'yanchor': 'bottom',
'yref': 'paper', 'showarrow': False,
'text': 'Two-sided Mann-Whitney U: {:.4g} '
- 'p-value: {:.4g}'.format(
- mw_u, mw_p)
+ 'p-value: {:.4g}'.format(mw_u, mw_p)
})
layout['annotations'].extend([
@@ -1401,7 +1405,8 @@ def _aggregate_by_tag(yvals, aggregate_items, label_type,
label_type_tag = label_type + '_tag'
for tag_name, names in aggregate_items.items():
- yvals_tmp = yvals.loc[yvals.index.isin(names, level=label_type), :].copy()
+ yvals_tmp = yvals.loc[yvals.index.isin(
+ names, level=label_type), :].copy()
# Add counts to the tag names
if add_counts:
@@ -1414,7 +1419,7 @@ def _aggregate_by_tag(yvals, aggregate_items, label_type,
new = pd.concat(df_list)
labels = list(new.index.names)
- new.reset_index([l for l in labels if l != label_type], inplace=True)
+ new.reset_index([lbl for lbl in labels if lbl != label_type], inplace=True)
labels[labels.index(label_type)] = label_type_tag
new.set_index(labels, inplace=True, drop=replace_index)
if replace_index:
@@ -1517,7 +1522,8 @@ def plot_time_course(hts_pandas,
if show_dip_fit:
if df_controls is not None:
dip_rate_ctrl = ctrl_dip_rates(df_controls)
- dip_rate_ctrl.index = dip_rate_ctrl.index.droplevel(level='cell_line')
+ dip_rate_ctrl.index = dip_rate_ctrl.index.droplevel(
+ level='cell_line')
dip_rates = expt_dip_rates(df_doses, df_vals)
dip_rates.reset_index(inplace=True)
dip_rates.set_index('well_id', inplace=True)
@@ -1719,7 +1725,11 @@ def plot_ctrl_cell_counts_by_plate(df_controls, title=None, subtitle=None,
# Sort by median DIP rate
df_controls = df_controls.copy()
- df_controls = df_controls['value'].groupby(level=['cell_line', 'plate']).apply(lambda x: x.quantile(q=(0, 0.25, 0.25, 0.5, 0.75, 0.75, 1))).reset_index(level=2, drop=True).to_frame()
+ df_controls = df_controls['value'].groupby(
+ level=['cell_line', 'plate']).apply(
+ lambda x: x.quantile(
+ q=(0, 0.25, 0.25, 0.5, 0.75, 0.75, 1))
+ ).reset_index(level=2, drop=True).to_frame()
df_controls['cl_median'] = df_controls['value'].groupby(
level=['cell_line']).transform(np.nanmedian)
@@ -1834,22 +1844,25 @@ def plot_plate_map(plate_data, color_by='dip_rates',
y=[rows - (well_num // cols) + well_rad for well_num in
range(num_wells)],
text='',
- hovertext=['Well {}{}
'
- 'DIP: {}
'
- 'Cell Line: {}
'
- 'Drug: {}
'
- 'Dose: {}'.format(
- row_labels[well_num // cols],
- col_labels[well_num % cols],
- plate_data.dip_rates[well_num],
- plate_data.cell_lines[well_num],
- " & ".join(["(None)" if d is None else d for d in
- plate_data.drugs[well_num]]) if
- plate_data.drugs[well_num] else 'None',
- " & ".join([format_dose(d) for d in
- plate_data.doses[well_num]]) if
- plate_data.doses[well_num] else 'N/A'
- )
+ hovertext=[
+ 'Well {}{}
'
+ 'DIP: {}
'
+ 'Cell Line: {}
'
+ 'Drug: {}
'
+ 'Dose: {}'.format(
+ row_labels[well_num // cols],
+ col_labels[well_num % cols],
+ plate_data.dip_rates[well_num],
+ plate_data.cell_lines[well_num],
+ " & ".join(
+ ["(None)" if d is None else d for d in
+ plate_data.drugs[well_num]])
+ if plate_data.drugs[well_num] else 'None',
+ " & ".join(
+ [format_dose(d) for d in
+ plate_data.doses[well_num]])
+ if plate_data.doses[well_num] else 'N/A'
+ )
for well_num in range(num_wells)],
hoverinfo='text',
mode='text'
diff --git a/thunor/tests/test_curve_fit.py b/thunor/tests/test_curve_fit.py
index 70addf7..843cf57 100644
--- a/thunor/tests/test_curve_fit.py
+++ b/thunor/tests/test_curve_fit.py
@@ -1,5 +1,4 @@
from thunor.curve_fit import fit_drc, HillCurveLL4
-from numpy.testing import assert_raises
def test_fit_drc_3_data_points():
diff --git a/thunor/tests/test_io.py b/thunor/tests/test_io.py
index e9fe958..57b14ef 100644
--- a/thunor/tests/test_io.py
+++ b/thunor/tests/test_io.py
@@ -125,7 +125,8 @@ def test_csv_two_drugs_drug2_blank(self):
def test_read_incucyte():
- ref = importlib.resources.files('thunor') / 'testdata/test_incucyte_minimal.txt'
+ ref = importlib.resources.files('thunor') / \
+ 'testdata/test_incucyte_minimal.txt'
with importlib.resources.as_file(ref) as filename:
thunor.io.read_incucyte(filename)
diff --git a/thunor/tests/test_plots.py b/thunor/tests/test_plots.py
index 3bef8af..b9339fe 100644
--- a/thunor/tests/test_plots.py
+++ b/thunor/tests/test_plots.py
@@ -78,7 +78,7 @@ def test_plot_two_params(self):
fit_param_sort='ec25')
assert isinstance(plotly_to_dataframe(x),
- pd.DataFrame)
+ pd.DataFrame)
def test_plot_dip_params_aggregation(self):
assert isinstance(plotly_to_dataframe(plot_drc_params(
diff --git a/versioneer.py b/versioneer.py
index 1e3753e..6552c5c 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,4 +1,3 @@
-
# Version: 0.29
"""The Versioneer - like a rocketeer, but for versions.
@@ -23,15 +22,17 @@
## Quick Install
-Versioneer provides two installation modes. The "classic" vendored mode installs
-a copy of versioneer into your repository. The experimental build-time dependency mode
-is intended to allow you to skip this step and simplify the process of upgrading.
+Versioneer provides two installation modes. The "classic" vendored mode
+installs a copy of versioneer into your repository. The experimental build-time
+dependency mode is intended to allow you to skip this step and simplify the
+process of upgrading.
### Vendored mode
* `pip install versioneer` to somewhere in your $PATH
- * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is
- available, so you can also use `conda install -c conda-forge versioneer`
+ * A [conda-forge recipe]
+ (https://github.com/conda-forge/versioneer-feedstock)
+ is available, so you can also use `conda install -c conda-forge versioneer`
* add a `[tool.versioneer]` section to your `pyproject.toml` or a
`[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md))
* Note that you will need to add `tomli; python_version < "3.11"` to your
@@ -42,7 +43,8 @@
### Build-time dependency mode
* `pip install versioneer` to somewhere in your $PATH
- * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is
+ * A [conda-forge recipe]
+ (https://github.com/conda-forge/versioneer-feedstock) is
available, so you can also use `conda install -c conda-forge versioneer`
* add a `[tool.versioneer]` section to your `pyproject.toml` or a
`[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md))
@@ -219,10 +221,10 @@
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
-[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
-this issue. The discussion in
-[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
-issue from the Versioneer side in more detail.
+[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is
+tracking this issue. The discussion in
+[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61)
+describes the issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
@@ -249,9 +251,9 @@
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
-[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
-this one, but upgrading to a newer version of setuptools should probably
-resolve it.
+[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83)
+describes this one, but upgrading to a newer version of setuptools should
+probably resolve it.
## Updating Versioneer
@@ -281,12 +283,12 @@
## Similar projects
-* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
- dependency
-* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
- versioneer
-* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools
- plugin
+* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a
+ non-vendored build-time dependency
+* [minver](https://github.com/jbweston/miniver) - a lightweight
+ reimplementation of versioneer
+* [versioningit](https://github.com/jwodder/versioningit) - a PEP
+ 518-based setuptools plugin
## License
@@ -367,11 +369,13 @@ def get_root() -> str:
or os.path.exists(pyproject_toml)
or os.path.exists(versioneer_py)
):
- err = ("Versioneer was unable to run the project root directory. "
- "Versioneer requires setup.py to be executed from "
- "its immediate directory (like 'python setup.py COMMAND'), "
- "or in a way that lets it use sys.argv[0] to find the root "
- "(like 'python path/to/setup.py COMMAND').")
+ err = (
+ "Versioneer was unable to run the project root directory. "
+ "Versioneer requires setup.py to be executed from "
+ "its immediate directory (like 'python setup.py COMMAND'), "
+ "or in a way that lets it use sys.argv[0] to find the root "
+ "(like 'python path/to/setup.py COMMAND')."
+ )
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
@@ -384,8 +388,10 @@ def get_root() -> str:
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals():
- print("Warning: build in %s is using versioneer.py from %s"
- % (os.path.dirname(my_path), versioneer_py))
+ print(
+ "Warning: build in %s is using versioneer.py from %s"
+ % (os.path.dirname(my_path), versioneer_py)
+ )
except NameError:
pass
return root
@@ -403,9 +409,9 @@ def get_config_from_root(root: str) -> VersioneerConfig:
section: Union[Dict[str, Any], configparser.SectionProxy, None] = None
if pyproject_toml.exists() and have_tomllib:
try:
- with open(pyproject_toml, 'rb') as fobj:
+ with open(pyproject_toml, "rb") as fobj:
pp = tomllib.load(fobj)
- section = pp['tool']['versioneer']
+ section = pp["tool"]["versioneer"]
except (tomllib.TOMLDecodeError, KeyError) as e:
print(f"Failed to load config from {pyproject_toml}: {e}")
print("Try to load it from setup.cfg")
@@ -422,7 +428,7 @@ def get_config_from_root(root: str) -> VersioneerConfig:
# `None` values elsewhere where it matters
cfg = VersioneerConfig()
- cfg.VCS = section['VCS']
+ cfg.VCS = section["VCS"]
cfg.style = section.get("style", "")
cfg.versionfile_source = cast(str, section.get("versionfile_source"))
cfg.versionfile_build = section.get("versionfile_build")
@@ -450,10 +456,12 @@ class NotThisMethod(Exception):
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS."""
+
def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
+
return decorate
@@ -480,10 +488,14 @@ def run_command(
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
- process = subprocess.Popen([command] + args, cwd=cwd, env=env,
- stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None), **popen_kwargs)
+ process = subprocess.Popen(
+ [command] + args,
+ cwd=cwd,
+ env=env,
+ stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr else None),
+ **popen_kwargs,
+ )
break
except OSError as e:
if e.errno == errno.ENOENT:
@@ -505,7 +517,9 @@ def run_command(
return stdout, process.returncode
-LONG_VERSION_PY['git'] = r'''
+LONG_VERSION_PY[
+ "git"
+] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -885,7 +899,8 @@ def git_pieces_from_vcs(
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
+ date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root
+ )[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
@@ -975,10 +990,12 @@ def render_pep440_pre(pieces: Dict[str, Any]) -> str:
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
- tag_version, post_version = pep440_split_post(pieces["closest-tag"])
+ tag_version, post_version = pep440_split_post(
+ pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
- rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"])
+ rendered += ".post%%d.dev%%d" %% (post_version + 1,
+ pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
@@ -1259,7 +1276,7 @@ def git_versions_from_keywords(
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
- tags = {r for r in refs if re.search(r'\d', r)}
+ tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
@@ -1271,28 +1288,32 @@ def git_versions_from_keywords(
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
- if not re.match(r'\d', r):
+ if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None,
- "date": date}
+ return {
+ "version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": None,
+ "date": date,
+ }
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags", "date": None}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": "no suitable tags",
+ "date": None,
+ }
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(
- tag_prefix: str,
- root: str,
- verbose: bool,
- runner: Callable = run_command
+ tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree.
@@ -1320,10 +1341,19 @@ def git_pieces_from_vcs(
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
- describe_out, rc = runner(GITS, [
- "describe", "--tags", "--dirty", "--always", "--long",
- "--match", f"{tag_prefix}[[:digit:]]*"
- ], cwd=root)
+ describe_out, rc = runner(
+ GITS,
+ [
+ "describe",
+ "--tags",
+ "--dirty",
+ "--always",
+ "--long",
+ "--match",
+ f"{tag_prefix}[[:digit:]]*",
+ ],
+ cwd=root,
+ )
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
@@ -1379,17 +1409,17 @@ def git_pieces_from_vcs(
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
+ git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%s'"
- % describe_out)
+ pieces["error"] = "unable to parse git-describe output: '%s'" % \
+ describe_out
return pieces
# tag
@@ -1398,8 +1428,10 @@ def git_pieces_from_vcs(
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
- % (full_tag, tag_prefix))
+ pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
+ full_tag,
+ tag_prefix,
+ )
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
@@ -1416,7 +1448,8 @@ def git_pieces_from_vcs(
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
+ date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root
+ )[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
@@ -1479,15 +1512,21 @@ def versions_from_parentdir(
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None, "date": None}
+ return {
+ "version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False,
+ "error": None,
+ "date": None,
+ }
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
- print("Tried directories %s but none started with prefix %s" %
- (str(rootdirs), parentdir_prefix))
+ print(
+ "Tried directories %s but none started with prefix %s"
+ % (str(rootdirs), parentdir_prefix)
+ )
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@@ -1516,11 +1555,17 @@ def versions_from_file(filename: str) -> Dict[str, Any]:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
- mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
+ mo = re.search(
+ r"version_json = '''\n(.*)''' # END VERSION_JSON",
+ contents,
+ re.M | re.S
+ )
if not mo:
- mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
+ mo = re.search(
+ r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
+ contents,
+ re.M | re.S
+ )
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
@@ -1528,8 +1573,8 @@ def versions_from_file(filename: str) -> Dict[str, Any]:
def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None:
"""Write the given version number to the given _version.py file."""
- contents = json.dumps(versions, sort_keys=True,
- indent=1, separators=(",", ": "))
+ contents = json.dumps(versions, sort_keys=True, indent=1,
+ separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
@@ -1561,8 +1606,7 @@ def render_pep440(pieces: Dict[str, Any]) -> str:
rendered += ".dirty"
else:
# exception #1
- rendered = "0+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@@ -1591,8 +1635,7 @@ def render_pep440_branch(pieces: Dict[str, Any]) -> str:
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
- rendered += "+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
+ rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@@ -1617,10 +1660,12 @@ def render_pep440_pre(pieces: Dict[str, Any]) -> str:
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
- tag_version, post_version = pep440_split_post(pieces["closest-tag"])
+ tag_version, post_version = pep440_split_post(
+ pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
- rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
+ rendered += ".post%d.dev%d" % (post_version + 1,
+ pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
@@ -1753,11 +1798,13 @@ def render_git_describe_long(pieces: Dict[str, Any]) -> str:
def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style."""
if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"],
- "date": None}
+ return {
+ "version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ "date": None,
+ }
if not style or style == "default":
style = "pep440" # the default
@@ -1781,9 +1828,13 @@ def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
else:
raise ValueError("unknown style '%s'" % style)
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None,
- "date": pieces.get("date")}
+ return {
+ "version": rendered,
+ "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"],
+ "error": None,
+ "date": pieces.get("date"),
+ }
class VersioneerBadRootError(Exception):
@@ -1806,8 +1857,9 @@ def get_versions(verbose: bool = False) -> Dict[str, Any]:
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None`
- assert cfg.versionfile_source is not None, \
- "please set versioneer.versionfile_source"
+ assert (
+ cfg.versionfile_source is not None
+ ), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
@@ -1861,9 +1913,13 @@ def get_versions(verbose: bool = False) -> Dict[str, Any]:
if verbose:
print("unable to compute version")
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None, "error": "unable to compute version",
- "date": None}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version",
+ "date": None,
+ }
def get_version() -> str:
@@ -1890,7 +1946,8 @@ def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None):
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
- # Also see https://github.com/python-versioneer/python-versioneer/issues/52
+ # Also see
+ # https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
@@ -1916,6 +1973,7 @@ def run(self) -> None:
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
+
cmds["version"] = cmd_version
# we override "build_py" in setuptools
@@ -1937,8 +1995,8 @@ def run(self) -> None:
# but the build_py command is not expected to copy any files.
# we override different "build_py" commands for both environments
- if 'build_py' in cmds:
- _build_py: Any = cmds['build_py']
+ if "build_py" in cmds:
+ _build_py: Any = cmds["build_py"]
else:
from setuptools.command.build_py import build_py as _build_py
@@ -1959,10 +2017,11 @@ def run(self) -> None:
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
+
cmds["build_py"] = cmd_build_py
- if 'build_ext' in cmds:
- _build_ext: Any = cmds['build_ext']
+ if "build_ext" in cmds:
+ _build_ext: Any = cmds["build_ext"]
else:
from setuptools.command.build_ext import build_ext as _build_ext
@@ -1985,16 +2044,20 @@ def run(self) -> None:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
if not os.path.exists(target_versionfile):
- print(f"Warning: {target_versionfile} does not exist, skipping "
- "version update. This can happen if you are running build_ext "
- "without first running build_py.")
+ print(
+ f"Warning: {target_versionfile} does not exist, skipping "
+ "version update. This can happen if you are running "
+ "build_ext without first running build_py."
+ )
return
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
+
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe # type: ignore
+
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
@@ -2015,21 +2078,27 @@ def run(self) -> None:
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
- if 'py2exe' in sys.modules: # py2exe enabled?
+ if "py2exe" in sys.modules: # py2exe enabled?
try:
- from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore
+ from py2exe.setuptools_buildexe import ( # type: ignore
+ py2exe as _py2exe)
except ImportError:
- from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore
+ from py2exe.distutils_buildexe import ( # type: ignore
+ py2exe as _py2exe)
class cmd_py2exe(_py2exe):
def run(self) -> None:
@@ -2044,18 +2113,22 @@ def run(self) -> None:
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
cmds["py2exe"] = cmd_py2exe
# sdist farms its file list building out to egg_info
- if 'egg_info' in cmds:
- _egg_info: Any = cmds['egg_info']
+ if "egg_info" in cmds:
+ _egg_info: Any = cmds["egg_info"]
else:
from setuptools.command.egg_info import egg_info as _egg_info
@@ -2068,7 +2141,7 @@ def find_sources(self) -> None:
# Modify the filelist and normalize it
root = get_root()
cfg = get_config_from_root(root)
- self.filelist.append('versioneer.py')
+ self.filelist.append("versioneer.py")
if cfg.versionfile_source:
# There are rare cases where versionfile_source might not be
# included by default, so we must be explicit
@@ -2081,18 +2154,21 @@ def find_sources(self) -> None:
# We will instead replicate their final normalization (to unicode,
# and POSIX-style paths)
from setuptools import unicode_utils
- normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/')
- for f in self.filelist.files]
- manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt')
- with open(manifest_filename, 'w') as fobj:
- fobj.write('\n'.join(normalized))
+ normalized = [
+ unicode_utils.filesys_decode(f).replace(os.sep, "/")
+ for f in self.filelist.files
+ ]
+
+ manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+ with open(manifest_filename, "w") as fobj:
+ fobj.write("\n".join(normalized))
- cmds['egg_info'] = cmd_egg_info
+ cmds["egg_info"] = cmd_egg_info
# we override different "sdist" commands for both environments
- if 'sdist' in cmds:
- _sdist: Any = cmds['sdist']
+ if "sdist" in cmds:
+ _sdist: Any = cmds["sdist"]
else:
from setuptools.command.sdist import sdist as _sdist
@@ -2114,8 +2190,10 @@ def make_release_tree(self, base_dir: str, files: List[str]) -> None:
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile,
- self._versioneer_generated_versions)
+ write_to_version_file(
+ target_versionfile, self._versioneer_generated_versions
+ )
+
cmds["sdist"] = cmd_sdist
return cmds
@@ -2175,8 +2253,8 @@ def do_setup() -> int:
root = get_root()
try:
cfg = get_config_from_root(root)
- except (OSError, configparser.NoSectionError,
- configparser.NoOptionError) as e:
+ except (OSError, configparser.NoSectionError, configparser.NoOptionError
+ ) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
@@ -2188,15 +2266,18 @@ def do_setup() -> int:
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG % {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
-
- ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
- "__init__.py")
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
+ ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
maybe_ipy: Optional[str] = ipy
if os.path.exists(ipy):
try: