From a709864c4e4093a9b61272e9d9a6f17cec2e620e Mon Sep 17 00:00:00 2001 From: Stefan Jansen Date: Thu, 12 Jan 2023 11:28:16 -0500 Subject: [PATCH 1/4] numerous changes --- .flake8 | 13 + .github/workflows/unit_tests.yml | 2 +- .pre-commit-config.yaml | 3 +- pyfolio/_version.py | 489 ----- pyfolio/interesting_periods.py | 75 - pyproject.toml | 154 +- setup.cfg | 92 - setup.py | 21 - {pyfolio => src/pyfolio}/__init__.py | 24 +- src/pyfolio/_version.py | 4 + {pyfolio => src/pyfolio}/capacity.py | 23 +- {pyfolio => src/pyfolio}/deprecate.py | 0 .../pyfolio}/examples/results.pickle | Bin .../round_trip_tear_sheet_example.ipynb | 0 .../examples/sector_mappings_example.ipynb | 2 +- .../examples/single_stock_example.ipynb | 2 +- .../pyfolio}/examples/slippage_example.ipynb | 0 .../examples/zipline_algo_example.ipynb | 2 +- src/pyfolio/interesting_periods.py | 135 ++ {pyfolio => src/pyfolio}/ipycompat.py | 0 {pyfolio => src/pyfolio}/perf_attrib.py | 28 +- {pyfolio => src/pyfolio}/plotting.py | 110 +- {pyfolio => src/pyfolio}/pos.py | 0 {pyfolio => src/pyfolio}/round_trips.py | 40 +- {pyfolio => src/pyfolio}/tears.py | 54 +- {pyfolio => src/pyfolio}/timeseries.py | 61 +- {pyfolio => src/pyfolio}/txn.py | 4 +- {pyfolio => src/pyfolio}/utils.py | 22 +- {pyfolio/tests => tests}/__init__.py | 0 {pyfolio/tests => tests}/matplotlibrc | 0 {pyfolio/tests => tests}/test_capacity.py | 24 +- .../test_data/factor_loadings.csv | 0 .../test_data/factor_returns.csv | 0 .../tests => tests}/test_data/intercepts.csv | 0 .../tests => tests}/test_data/positions.csv | 0 .../tests => tests}/test_data/residuals.csv | 0 .../tests => tests}/test_data/returns.csv | 0 .../tests => tests}/test_data/test_LMCAP.csv | 0 .../test_data/test_LT_MOMENTUM.csv | 0 .../test_data/test_MACDSignal.csv | 0 .../tests => tests}/test_data/test_VLTY.csv | 0 .../tests => tests}/test_data/test_caps.csv | 0 .../test_data/test_gross_lev.csv.gz | Bin .../tests => tests}/test_data/test_pos.csv.gz | Bin .../test_data/test_returns.csv.gz | Bin .../test_data/test_sectors.csv | 0 .../test_data/test_shares_held.csv | 0 .../tests => tests}/test_data/test_txn.csv.gz | Bin .../test_data/test_volumes.csv | 0 {pyfolio/tests => tests}/test_perf_attrib.py | 92 +- {pyfolio/tests => tests}/test_pos.py | 12 +- {pyfolio/tests => tests}/test_round_trips.py | 12 +- {pyfolio/tests => tests}/test_tears.py | 91 +- {pyfolio/tests => tests}/test_timeseries.py | 57 +- {pyfolio/tests => tests}/test_txn.py | 4 +- tox.ini | 45 - versioneer.py | 1761 ----------------- 57 files changed, 556 insertions(+), 2902 deletions(-) create mode 100644 .flake8 delete mode 100644 pyfolio/_version.py delete mode 100644 pyfolio/interesting_periods.py delete mode 100644 setup.cfg delete mode 100755 setup.py rename {pyfolio => src/pyfolio}/__init__.py (67%) create mode 100644 src/pyfolio/_version.py rename {pyfolio => src/pyfolio}/capacity.py (94%) rename {pyfolio => src/pyfolio}/deprecate.py (100%) rename {pyfolio => src/pyfolio}/examples/results.pickle (100%) rename {pyfolio => src/pyfolio}/examples/round_trip_tear_sheet_example.ipynb (100%) rename {pyfolio => src/pyfolio}/examples/sector_mappings_example.ipynb (99%) rename {pyfolio => src/pyfolio}/examples/single_stock_example.ipynb (99%) rename {pyfolio => src/pyfolio}/examples/slippage_example.ipynb (100%) rename {pyfolio => src/pyfolio}/examples/zipline_algo_example.ipynb (99%) create mode 100644 src/pyfolio/interesting_periods.py rename {pyfolio => src/pyfolio}/ipycompat.py (100%) rename {pyfolio => src/pyfolio}/perf_attrib.py (97%) rename {pyfolio => src/pyfolio}/plotting.py (95%) rename {pyfolio => src/pyfolio}/pos.py (100%) rename {pyfolio => src/pyfolio}/round_trips.py (93%) rename {pyfolio => src/pyfolio}/tears.py (97%) rename {pyfolio => src/pyfolio}/timeseries.py (96%) rename {pyfolio => src/pyfolio}/txn.py (98%) rename {pyfolio => src/pyfolio}/utils.py (97%) rename {pyfolio/tests => tests}/__init__.py (100%) rename {pyfolio/tests => tests}/matplotlibrc (100%) rename {pyfolio/tests => tests}/test_capacity.py (87%) rename {pyfolio/tests => tests}/test_data/factor_loadings.csv (100%) rename {pyfolio/tests => tests}/test_data/factor_returns.csv (100%) rename {pyfolio/tests => tests}/test_data/intercepts.csv (100%) rename {pyfolio/tests => tests}/test_data/positions.csv (100%) rename {pyfolio/tests => tests}/test_data/residuals.csv (100%) rename {pyfolio/tests => tests}/test_data/returns.csv (100%) rename {pyfolio/tests => tests}/test_data/test_LMCAP.csv (100%) rename {pyfolio/tests => tests}/test_data/test_LT_MOMENTUM.csv (100%) rename {pyfolio/tests => tests}/test_data/test_MACDSignal.csv (100%) rename {pyfolio/tests => tests}/test_data/test_VLTY.csv (100%) rename {pyfolio/tests => tests}/test_data/test_caps.csv (100%) rename {pyfolio/tests => tests}/test_data/test_gross_lev.csv.gz (100%) rename {pyfolio/tests => tests}/test_data/test_pos.csv.gz (100%) rename {pyfolio/tests => tests}/test_data/test_returns.csv.gz (100%) rename {pyfolio/tests => tests}/test_data/test_sectors.csv (100%) rename {pyfolio/tests => tests}/test_data/test_shares_held.csv (100%) rename {pyfolio/tests => tests}/test_data/test_txn.csv.gz (100%) rename {pyfolio/tests => tests}/test_data/test_volumes.csv (100%) rename {pyfolio/tests => tests}/test_perf_attrib.py (88%) rename {pyfolio/tests => tests}/test_pos.py (96%) rename {pyfolio/tests => tests}/test_round_trips.py (96%) rename {pyfolio/tests => tests}/test_tears.py (59%) rename {pyfolio/tests => tests}/test_timeseries.py (89%) rename {pyfolio/tests => tests}/test_txn.py (96%) delete mode 100644 tox.ini delete mode 100644 versioneer.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..bc3f8b05 --- /dev/null +++ b/.flake8 @@ -0,0 +1,13 @@ +[flake8] +exclude = + .git, + .pytest_cache + conda, + _sources, + __pycache__, + docs/source/conf.py, + src/pyfolio/_version.py +max-line-length = 88 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +ignore = E203, E266, E501, W503, F403, F401, E231 diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 72182cae..54fa92b5 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest , windows-latest, macos-latest ] - python-version: [ 3.7, 3.8, 3.9 ] + python-version: [ 3.7, 3.8, 3.9, '3.10', '3.11'] steps: - name: Checkout pyfolio uses: actions/checkout@v2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1ce51389..8e08b81a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,7 @@ repos: - id: check-merge-conflict - id: end-of-file-fixer - id: trailing-whitespace + - id: check-added-large-files - repo: https://gitlab.com/pycqa/flake8 rev: 3.9.1 @@ -13,7 +14,7 @@ repos: - id: flake8 - repo: https://github.com/psf/black - rev: 20.8b1 + rev: 22.3.0 hooks: - id: black diff --git a/pyfolio/_version.py b/pyfolio/_version.py deleted file mode 100644 index 1d86e9c0..00000000 --- a/pyfolio/_version.py +++ /dev/null @@ -1,489 +0,0 @@ -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.15 (https://github.com/warner/python-versioneer) - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - keywords = {"refnames": git_refnames, "full": git_full} - return keywords - - -class VersioneerConfig: - pass - - -def get_config(): - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "" - cfg.parentdir_prefix = "pyfolio-" - cfg.versionfile_source = "pyfolio/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - pass - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - def decorate(f): - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen( - [c] + args, - cwd=cwd, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr else None), - ) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - return None - return stdout - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - # Source tarballs conventionally unpack into a directory that includes - # both the project name and a version string. - dirname = os.path.basename(root) - if not dirname.startswith(parentdir_prefix): - if verbose: - print( - "guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix) - ) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return { - "version": dirname[len(parentdir_prefix) :], - "full-revisionid": None, - "dirty": False, - "error": None, - } - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - if not keywords: - raise NotThisMethod("no keywords at all, weird") - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r"\d", r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix) :] - if verbose: - print("picking %s" % r) - return { - "version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": None, - } - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return { - "version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": "no suitable tags", - } - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - # this runs 'git' from the root of the source tree. This only gets called - # if the git-archive 'subst' keywords were *not* expanded, and - # _version.py hasn't already been rewritten with a short version string, - # meaning we're inside a checked out source tree. - - if not os.path.exists(os.path.join(root, ".git")): - if verbose: - print("no .git in %s" % root) - raise NotThisMethod("no .git directory") - - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - # if there is a tag, this yields TAG-NUM-gHEX[-dirty] - # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command( - GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root - ) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[: git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ( - "unable to parse git-describe output: '%s'" % describe_out - ) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( - full_tag, - tag_prefix, - ) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix) :] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out = run_command( - GITS, ["rev-list", "HEAD", "--count"], cwd=root - ) - pieces["distance"] = int(count_out) # total number of commits - - return pieces - - -def plus_or_dot(pieces): - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - # now build up version string, with post-release "local version - # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - # exceptions: - # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - # TAG[.post.devDISTANCE] . No -dirty - - # exceptions: - # 1: no tags. 0.post.devDISTANCE - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that - # .dev0 sorts backwards (a dirty tree will appear "older" than the - # corresponding clean one), but you shouldn't be releasing software with - # -dirty anyways. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty - # --always' - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty - # --always -long'. The distance/hash is unconditional. - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - if pieces["error"]: - return { - "version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - } - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return { - "version": rendered, - "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], - "error": None, - } - - -def get_versions(): - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords( - get_keywords(), cfg.tag_prefix, verbose - ) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split("/"): - root = os.path.dirname(root) - except NameError: - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - } - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", - } diff --git a/pyfolio/interesting_periods.py b/pyfolio/interesting_periods.py deleted file mode 100644 index 7ffff742..00000000 --- a/pyfolio/interesting_periods.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# Copyright 2016 Quantopian, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generates a list of historical event dates that may have had -significant impact on markets. See extract_interesting_date_ranges.""" - -import pandas as pd - -from collections import OrderedDict - -PERIODS = OrderedDict() -# Dotcom bubble -PERIODS["Dotcom"] = (pd.Timestamp("20000310"), pd.Timestamp("20000910")) - -# Lehmann Brothers -PERIODS["Lehman"] = (pd.Timestamp("20080801"), pd.Timestamp("20081001")) - -# 9/11 -PERIODS["9/11"] = (pd.Timestamp("20010911"), pd.Timestamp("20011011")) - -# 05/08/11 US down grade and European Debt Crisis 2011 -PERIODS["US downgrade/European Debt Crisis"] = ( - pd.Timestamp("20110805"), - pd.Timestamp("20110905"), -) - -# 16/03/11 Fukushima melt down 2011 -PERIODS["Fukushima"] = (pd.Timestamp("20110316"), pd.Timestamp("20110416")) - -# 01/08/03 US Housing Bubble 2003 -PERIODS["US Housing"] = (pd.Timestamp("20030108"), pd.Timestamp("20030208")) - -# 06/09/12 EZB IR Event 2012 -PERIODS["EZB IR Event"] = (pd.Timestamp("20120910"), pd.Timestamp("20121010")) - -# August 2007, March and September of 2008, Q1 & Q2 2009, -PERIODS["Aug07"] = (pd.Timestamp("20070801"), pd.Timestamp("20070901")) -PERIODS["Mar08"] = (pd.Timestamp("20080301"), pd.Timestamp("20080401")) -PERIODS["Sept08"] = (pd.Timestamp("20080901"), pd.Timestamp("20081001")) -PERIODS["2009Q1"] = (pd.Timestamp("20090101"), pd.Timestamp("20090301")) -PERIODS["2009Q2"] = (pd.Timestamp("20090301"), pd.Timestamp("20090601")) - -# Flash Crash (May 6, 2010 + 1 week post), -PERIODS["Flash Crash"] = (pd.Timestamp("20100505"), pd.Timestamp("20100510")) - -# April and October 2014). -PERIODS["Apr14"] = (pd.Timestamp("20140401"), pd.Timestamp("20140501")) -PERIODS["Oct14"] = (pd.Timestamp("20141001"), pd.Timestamp("20141101")) - -# Market down-turn in August/Sept 2015 -PERIODS["Fall2015"] = (pd.Timestamp("20150815"), pd.Timestamp("20150930")) - -# Market regimes -PERIODS["Low Volatility Bull Market"] = ( - pd.Timestamp("20050101"), - pd.Timestamp("20070801"), -) - -PERIODS["GFC Crash"] = (pd.Timestamp("20070801"), pd.Timestamp("20090401")) - -PERIODS["Recovery"] = (pd.Timestamp("20090401"), pd.Timestamp("20130101")) - -PERIODS["New Normal"] = (pd.Timestamp("20130101"), pd.Timestamp("today")) diff --git a/pyproject.toml b/pyproject.toml index c41db53e..bb18bdad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,17 +1,128 @@ [project] -requires-python = '>=3.7' +name = "pyfolio-reloaded" +description = "Performance and risk analysis of financial portfolios with Python" + +requires-python = '>=3.8' +dynamic = ["version"] +readme = "README.md" +authors = [ + { name = 'Quantopian Inc' }, + { email = 'pm@ml4trading.io' } +] +maintainers = [ + { name = 'Stefan Jansen' }, + { email = 'pm@ml4trading.io' } +] +license = { file = "LICENSE" } + +classifiers = [ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Natural Language :: English', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Operating System :: OS Independent', + 'Intended Audience :: Science/Research', + 'Topic :: Office/Business :: Financial :: Investment', + 'Topic :: Scientific/Engineering :: Information Analysis', +] + +dependencies = [ + "ipython >=3.2.3", + "matplotlib >=1.4.0", + "numpy >=1.11.1", + "pandas >=0.18.1", + "pytz >=2014.10", + "scipy >=0.14.0", + "scikit-learn >=0.16.1", + "seaborn >=0.7.1", + "empyrical-reloaded >=0.5.8", +] + +[project.urls] +homepage = 'https://ml4trading.io' +repository = 'https://github.com/stefan-jansen/pyfolio-reloaded' +documentation = 'https://pyfolio.ml4trading.io' [build-system] requires = [ 'setuptools>=54.0.0', + "setuptools_scm[toml]>=6.2", 'wheel>=0.31.0', 'oldest-supported-numpy; python_version>="3.7"', ] build-backend = 'setuptools.build_meta' +[project.optional-dependencies] +test = [ + "tox >=2.3.1", + "coverage >=4.0.3", + "coveralls ==3.0.1", + "pytest >=6.2", + 'pytest-xdist >=2.5.0', + "pytest-cov >=2.12", + "parameterized >=0.6.1", + "flake8 >=3.9.1", + "black", +] +dev = [ + "flake8 >=3.9.1", + "black", + "pre-commit >=2.12.1", +] +docs = [ + 'Cython', + 'Sphinx >=1.3.2', + 'numpydoc >=0.5.0', + 'sphinx-autobuild >=0.6.0', + 'pydata-sphinx-theme', + 'sphinx-markdown-tables', + "sphinx_copybutton", + 'm2r2' +] + +[tool.setuptools] +include-package-data = true +zip-safe = false + +[tool.setuptools.packages.find] +where = ['src'] +exclude = ['tests*'] + +[tool.setuptools_scm] +write_to = "src/pyfolio/_version.py" +version_scheme = 'guess-next-dev' +local_scheme = 'dirty-tag' + + +[tool.pytest] +minversion = "6.0" +testpaths = 'tests' +addopts = '-v' + + +[tool.cibuildwheel] +test-extras = "test" +test-command = "pytest -n 2 --reruns 5 {package}/tests" +build-verbosity = 3 + + +[tool.cibuildwheel.macos] +archs = ["x86_64", "arm64", "universal2"] +test-skip = ["*universal2:arm64"] + + +[tool.cibuildwheel.linux] +archs = ["auto64"] +skip = "*musllinux*" + + [tool.black] -target-version = ['py37', 'py38'] -line-length = 79 +line-length = 88 +target-version = ['py38', 'py39', 'py310'] include = '\.pyi?$' extend-exclude = ''' \( @@ -19,9 +130,34 @@ extend-exclude = ''' \) ''' -[tool.pytest.ini_options] -minversion = "6.0" -addopts = "-q" -testpaths = [ - "pyfolio/tests" -] +[tool.tox] +legacy_tox_ini = """ +[tox] +envlist = py{38,39}-pandas12, py{38,39,310}-pandas{13,14,15}, py311-pandas15 +isolated_build = True +skip_missing_interpreters = True +minversion = 3.23.0 + +[gh-actions] +python = + 3.8: py38 + 3.9: py39 + 3.10: py310 + 3.11: py311 + +[testenv] +usedevelop = True +setenv = + MPLBACKEND = Agg + +changedir = tmp +extras = test +deps = + pandas12: pandas>=1.2.0,<1.3 + pandas13: pandas>=1.3.0,<1.4 + pandas14: pandas>=1.4.0,<1.5 + pandas15: pandas>=1.5.0,<1.6 + +commands = + pytest -n 4 --reruns 5 --cov={toxinidir}/src --cov-report term --cov-report=xml --cov-report=html:htmlcov {toxinidir}/tests +""" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index af8f4d27..00000000 --- a/setup.cfg +++ /dev/null @@ -1,92 +0,0 @@ -[metadata] -name = pyfolio-reloaded -url = https://pyfolio.ml4trading.io -description = pyfolio is a Python library for performance and risk analysis of financial portfolios -long_description = file: README.md -long_description_content_type = text/markdown -maintainer = Applied AI, LLC -maintainer email = pm@ml4trading.io -author = Quantopian Inc -author email = opensource@quantopian.com -license = Apache License, Version 2.0 -license_file = LICENSE - -classifiers = - Development Status :: 4 - Beta - Programming Language :: Python - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - License :: OSI Approved :: Apache Software License - Intended Audience :: Science/Research - Topic :: Scientific/Engineering - Topic :: Scientific/Engineering :: Mathematics - Operating System :: OS Independent - - -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. -[versioneer] -VCS = git -style = pep440 -versionfile_source = pyfolio/_version.py -versionfile_build = pyfolio/_version.py -tag_prefix = -parentdir_prefix = pyfolio- - -[nosetests] -verbosity = 2 -with-ignore-docstrings = 1 -with-timer = 1 -timer-top-n = 15 -with-coverage = 1 -cover-package = pyfolio.tests -with-doctest = 1 -logging-level = INFO - -[options] -python_requires = >=3.7 -include_package_data = True -install_requires = - ipython>=3.2.3 - matplotlib>=1.4.0 - numpy>=1.11.1 - pandas>=0.18.1 - pytz>=2014.10 - scipy>=0.14.0 - scikit-learn>=0.16.1 - seaborn>=0.7.1 - empyrical-reloaded>=0.5.8 - -[options.extras_require] -all = - %(doc)s - %(dev)s - %(test)s - -test = - tox>=2.3.1 - coverage>=4.0.3 - coveralls==3.0.1 - pytest>=6.2 - pytest-cov>=2.12 - parameterized>=0.6.1 - flake8>=3.9.1 - black - -dev = - flake8>=3.9.1 - black - pre-commit>=2.12.1 - -doc = - Cython - Sphinx>=1.3.2 - numpydoc>=0.5.0 - sphinx-autobuild>=0.6.0 - pydata-sphinx-theme - sphinx-markdown-tables - sphinx_copybutton - m2r2 diff --git a/setup.py b/setup.py deleted file mode 100755 index 4b8957b8..00000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -import sys -from pathlib import Path -from setuptools import setup - -# ensure the current directory is on sys.path -# so versioneer can be imported when pip uses -# PEP 517/518 build rules. -# https://github.com/python-versioneer/python-versioneer/issues/193 -sys.path.append(Path(__file__).resolve(strict=True).parent.as_posix()) -import versioneer # noqa: E402 - - -if __name__ == "__main__": - setup( - cmdclass=versioneer.get_cmdclass(), - version=versioneer.get_version(), - packages=["pyfolio", "pyfolio.tests"], - package_data={"pyfolio": ["data/*.*"]}, - test_suite="nose.collector", - ) diff --git a/pyfolio/__init__.py b/src/pyfolio/__init__.py similarity index 67% rename from pyfolio/__init__.py rename to src/pyfolio/__init__.py index ddbaba72..4785ee48 100644 --- a/pyfolio/__init__.py +++ b/src/pyfolio/__init__.py @@ -1,18 +1,20 @@ -from . import utils -from . import timeseries -from . import pos -from . import txn -from . import interesting_periods from . import capacity -from . import round_trips +from . import interesting_periods from . import perf_attrib - -from .tears import * # noqa +from . import pos +from . import round_trips +from . import timeseries +from . import txn +from . import utils from .plotting import * # noqa -from ._version import get_versions +from .tears import * # noqa -__version__ = get_versions()["version"] -del get_versions +try: + from ._version import version as __version__ + from ._version import version_tuple +except ImportError: + __version__ = "unknown version" + version_tuple = (0, 0, "unknown version") __all__ = [ "utils", diff --git a/src/pyfolio/_version.py b/src/pyfolio/_version.py new file mode 100644 index 00000000..0beaed4b --- /dev/null +++ b/src/pyfolio/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = "0.9.5.dev4+dirty" +__version_tuple__ = version_tuple = (0, 9, 5, "dev4", "dirty") diff --git a/pyfolio/capacity.py b/src/pyfolio/capacity.py similarity index 94% rename from pyfolio/capacity.py rename to src/pyfolio/capacity.py index 42b6e0b7..3e27a27d 100644 --- a/pyfolio/capacity.py +++ b/src/pyfolio/capacity.py @@ -39,9 +39,7 @@ def daily_txns_with_bar_data(transactions, market_data): txn_daily["price"] = market_data.xs("price", level=1).unstack() txn_daily["volume"] = market_data.xs("volume", level=1).unstack() - txn_daily = ( - txn_daily.reset_index().set_index("date").sort_index().asfreq("D") - ) + txn_daily = txn_daily.reset_index().set_index("date").sort_index().asfreq("D") return txn_daily @@ -91,9 +89,7 @@ def days_to_liquidate_positions( """ DV = market_data.xs("volume", level=1) * market_data.xs("price", level=1) - roll_mean_dv = ( - DV.rolling(window=mean_volume_window, center=False).mean().shift() - ) + roll_mean_dv = DV.rolling(window=mean_volume_window, center=False).mean().shift() roll_mean_dv = roll_mean_dv.replace(0, np.nan) positions_alloc = pos.get_percent_alloc(positions) @@ -175,9 +171,7 @@ def get_max_days_to_liquidate_by_ticker( return worst_liq -def get_low_liquidity_transactions( - transactions, market_data, last_n_days=None -): +def get_low_liquidity_transactions(transactions, market_data, last_n_days=None): """ For each traded name, find the daily transaction total that consumed the greatest proportion of available daily bar volume. @@ -204,9 +198,7 @@ def get_low_liquidity_transactions( txn_daily_w_bar = txn_daily_w_bar[txn_daily_w_bar.date > md] bar_consumption = txn_daily_w_bar.assign( - max_pct_bar_consumed=txn_daily_w_bar.amount.div( - txn_daily_w_bar.volume - ).mul(100) + max_pct_bar_consumed=txn_daily_w_bar.amount.div(txn_daily_w_bar.volume).mul(100) ).sort_values("max_pct_bar_consumed", ascending=False) max_bar_consumption = bar_consumption.groupby("symbol").first() @@ -252,9 +244,7 @@ def apply_slippage_penalty( simulate_traded_dollars = txn_daily.price * simulate_traded_shares simulate_pct_volume_used = simulate_traded_shares / txn_daily.volume - penalties = ( - simulate_pct_volume_used ** 2 * impact * simulate_traded_dollars - ) + penalties = simulate_pct_volume_used**2 * impact * simulate_traded_dollars daily_penalty = penalties.resample("D").sum() daily_penalty = daily_penalty.reindex(returns.index).fillna(0) @@ -264,8 +254,7 @@ def apply_slippage_penalty( # similarly. In other words, since we aren't applying compounding to # simulate_traded_shares, we shouldn't apply compounding to pv. portfolio_value = ( - ep.cum_returns(returns, starting_value=backtest_starting_capital) - * mult + ep.cum_returns(returns, starting_value=backtest_starting_capital) * mult ) adj_returns = returns - (daily_penalty / portfolio_value) diff --git a/pyfolio/deprecate.py b/src/pyfolio/deprecate.py similarity index 100% rename from pyfolio/deprecate.py rename to src/pyfolio/deprecate.py diff --git a/pyfolio/examples/results.pickle b/src/pyfolio/examples/results.pickle similarity index 100% rename from pyfolio/examples/results.pickle rename to src/pyfolio/examples/results.pickle diff --git a/pyfolio/examples/round_trip_tear_sheet_example.ipynb b/src/pyfolio/examples/round_trip_tear_sheet_example.ipynb similarity index 100% rename from pyfolio/examples/round_trip_tear_sheet_example.ipynb rename to src/pyfolio/examples/round_trip_tear_sheet_example.ipynb diff --git a/pyfolio/examples/sector_mappings_example.ipynb b/src/pyfolio/examples/sector_mappings_example.ipynb similarity index 99% rename from pyfolio/examples/sector_mappings_example.ipynb rename to src/pyfolio/examples/sector_mappings_example.ipynb index 52c7108d..e782fde0 100644 --- a/pyfolio/examples/sector_mappings_example.ipynb +++ b/src/pyfolio/examples/sector_mappings_example.ipynb @@ -961,4 +961,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/pyfolio/examples/single_stock_example.ipynb b/src/pyfolio/examples/single_stock_example.ipynb similarity index 99% rename from pyfolio/examples/single_stock_example.ipynb rename to src/pyfolio/examples/single_stock_example.ipynb index d04b0fb8..6a41005d 100644 --- a/pyfolio/examples/single_stock_example.ipynb +++ b/src/pyfolio/examples/single_stock_example.ipynb @@ -383,4 +383,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/pyfolio/examples/slippage_example.ipynb b/src/pyfolio/examples/slippage_example.ipynb similarity index 100% rename from pyfolio/examples/slippage_example.ipynb rename to src/pyfolio/examples/slippage_example.ipynb diff --git a/pyfolio/examples/zipline_algo_example.ipynb b/src/pyfolio/examples/zipline_algo_example.ipynb similarity index 99% rename from pyfolio/examples/zipline_algo_example.ipynb rename to src/pyfolio/examples/zipline_algo_example.ipynb index 69524e91..540bfc95 100644 --- a/pyfolio/examples/zipline_algo_example.ipynb +++ b/src/pyfolio/examples/zipline_algo_example.ipynb @@ -1933,4 +1933,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/src/pyfolio/interesting_periods.py b/src/pyfolio/interesting_periods.py new file mode 100644 index 00000000..5a3cb3d1 --- /dev/null +++ b/src/pyfolio/interesting_periods.py @@ -0,0 +1,135 @@ +# +# Copyright 2016 Quantopian, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates a list of historical event dates that may have had +significant impact on markets. See extract_interesting_date_ranges.""" + +import datetime as dt +from collections import OrderedDict + +import pandas as pd + +PERIODS = OrderedDict() +# Dotcom bubble +PERIODS["Dotcom"] = ( + pd.Timestamp("20000310", tzinfo=dt.timezone.utc), + pd.Timestamp("20000910", tzinfo=dt.timezone.utc), +) + +# Lehmann Brothers +PERIODS["Lehman"] = ( + pd.Timestamp("20080801", tzinfo=dt.timezone.utc), + pd.Timestamp("20081001", tzinfo=dt.timezone.utc), +) + +# 9/11 +PERIODS["9/11"] = ( + pd.Timestamp("20010911", tzinfo=dt.timezone.utc), + pd.Timestamp("20011011", tzinfo=dt.timezone.utc), +) + +# 05/08/11 US down grade and European Debt Crisis 2011 +PERIODS["US downgrade/European Debt Crisis"] = ( + pd.Timestamp("20110805", tzinfo=dt.timezone.utc), + pd.Timestamp("20110905", tzinfo=dt.timezone.utc), +) + +# 16/03/11 Fukushima melt down 2011 +PERIODS["Fukushima"] = ( + pd.Timestamp("20110316", tzinfo=dt.timezone.utc), + pd.Timestamp("20110416", tzinfo=dt.timezone.utc), +) + +# 01/08/03 US Housing Bubble 2003 +PERIODS["US Housing"] = ( + pd.Timestamp("20030108", tzinfo=dt.timezone.utc), + pd.Timestamp("20030208", tzinfo=dt.timezone.utc), +) + +# 06/09/12 EZB IR Event 2012 +PERIODS["EZB IR Event"] = ( + pd.Timestamp("20120910", tzinfo=dt.timezone.utc), + pd.Timestamp("20121010", tzinfo=dt.timezone.utc), +) + +# August 2007, March and September of 2008, Q1 & Q2 2009, +PERIODS["Aug07"] = ( + pd.Timestamp("20070801", tzinfo=dt.timezone.utc), + pd.Timestamp("20070901", tzinfo=dt.timezone.utc), +) +PERIODS["Mar08"] = ( + pd.Timestamp("20080301", tzinfo=dt.timezone.utc), + pd.Timestamp("20080401", tzinfo=dt.timezone.utc), +) +PERIODS["Sept08"] = ( + pd.Timestamp("20080901", tzinfo=dt.timezone.utc), + pd.Timestamp("20081001", tzinfo=dt.timezone.utc), +) +PERIODS["2009Q1"] = ( + pd.Timestamp("20090101", tzinfo=dt.timezone.utc), + pd.Timestamp("20090301", tzinfo=dt.timezone.utc), +) +PERIODS["2009Q2"] = ( + pd.Timestamp("20090301", tzinfo=dt.timezone.utc), + pd.Timestamp("20090601", tzinfo=dt.timezone.utc), +) + +# Flash Crash (May 6, 2010 + 1 week post), +PERIODS["Flash Crash"] = ( + pd.Timestamp("20100505", tzinfo=dt.timezone.utc), + pd.Timestamp("20100510", tzinfo=dt.timezone.utc), +) + +# April and October 2014). +PERIODS["Apr14"] = ( + pd.Timestamp("20140401", tzinfo=dt.timezone.utc), + pd.Timestamp("20140501", tzinfo=dt.timezone.utc), +) +PERIODS["Oct14"] = ( + pd.Timestamp("20141001", tzinfo=dt.timezone.utc), + pd.Timestamp("20141101", tzinfo=dt.timezone.utc), +) + +# Market down-turn in August/Sept 2015 +PERIODS["Fall2015"] = ( + pd.Timestamp("20150815", tzinfo=dt.timezone.utc), + pd.Timestamp("20150930", tzinfo=dt.timezone.utc), +) + +# Market regimes +PERIODS["Low Volatility Bull Market"] = ( + pd.Timestamp("20050101", tzinfo=dt.timezone.utc), + pd.Timestamp("20070801", tzinfo=dt.timezone.utc), +) + +PERIODS["GFC Crash"] = ( + pd.Timestamp("20070801", tzinfo=dt.timezone.utc), + pd.Timestamp("20090401", tzinfo=dt.timezone.utc), +) + +PERIODS["Recovery"] = ( + pd.Timestamp("20090401", tzinfo=dt.timezone.utc), + pd.Timestamp("20130101", tzinfo=dt.timezone.utc), +) + +PERIODS["New Normal"] = ( + pd.Timestamp("20130101", tzinfo=dt.timezone.utc), + pd.Timestamp("20180921", tzinfo=dt.timezone.utc), +) + +PERIODS["Covid"] = ( + pd.Timestamp("20200211", tzinfo=dt.timezone.utc), + pd.Timestamp("today", tzinfo=dt.timezone.utc), +) diff --git a/pyfolio/ipycompat.py b/src/pyfolio/ipycompat.py similarity index 100% rename from pyfolio/ipycompat.py rename to src/pyfolio/ipycompat.py diff --git a/pyfolio/perf_attrib.py b/src/pyfolio/perf_attrib.py similarity index 97% rename from pyfolio/perf_attrib.py rename to src/pyfolio/perf_attrib.py index 72f49bd3..21f2687b 100644 --- a/pyfolio/perf_attrib.py +++ b/src/pyfolio/perf_attrib.py @@ -233,9 +233,7 @@ def create_perf_attrib_stats(perf_attrib, risk_exposures): summary["Specific Sharpe Ratio"] = ep.sharpe_ratio(specific_returns) - summary["Cumulative Specific Return"] = ep.cum_returns_final( - specific_returns - ) + summary["Cumulative Specific Return"] = ep.cum_returns_final(specific_returns) summary["Cumulative Common Return"] = ep.cum_returns_final(common_returns) summary["Total Returns"] = ep.cum_returns_final(total_returns) @@ -364,18 +362,14 @@ def plot_returns(perf_attrib_data, cost=None, ax=None): returns = perf_attrib_data["total_returns"] total_returns_label = "Total returns" - cumulative_returns_less_costs = _cumulative_returns_less_costs( - returns, cost - ) + cumulative_returns_less_costs = _cumulative_returns_less_costs(returns, cost) if cost is not None: total_returns_label += " (adjusted)" specific_returns = perf_attrib_data["specific_returns"] common_returns = perf_attrib_data["common_returns"] - ax.plot( - cumulative_returns_less_costs, color="b", label=total_returns_label - ) + ax.plot(cumulative_returns_less_costs, color="b", label=total_returns_label) ax.plot( ep.cum_returns(specific_returns), color="g", @@ -388,9 +382,7 @@ def plot_returns(perf_attrib_data, cost=None, ax=None): ) if cost is not None: - ax.plot( - -ep.cum_returns(cost), color="k", label="Cumulative cost spent" - ) + ax.plot(-ep.cum_returns(cost), color="k", label="Cumulative cost spent") ax.set_title("Time series of cumulative returns") ax.set_ylabel("Returns") @@ -484,9 +476,7 @@ def plot_factor_contribution_to_perf( return ax -def plot_risk_exposures( - exposures, ax=None, title="Daily risk factor exposures" -): +def plot_risk_exposures(exposures, ax=None, title="Daily risk factor exposures"): """ Parameters ---------- @@ -585,9 +575,7 @@ def _align_and_warn( warnings.warn(missing_stocks_warning_msg) - positions = positions.drop( - missing_stocks, axis="columns", errors="ignore" - ) + positions = positions.drop(missing_stocks, axis="columns", errors="ignore") missing_factor_loadings_index = positions.index.difference( factor_loadings.index.get_level_values(0).unique() @@ -612,9 +600,7 @@ def _align_and_warn( warnings.warn(warning_msg) - positions = positions.drop( - missing_factor_loadings_index, errors="ignore" - ) + positions = positions.drop(missing_factor_loadings_index, errors="ignore") returns = returns.drop(missing_factor_loadings_index, errors="ignore") factor_returns = factor_returns.drop( missing_factor_loadings_index, errors="ignore" diff --git a/pyfolio/plotting.py b/src/pyfolio/plotting.py similarity index 95% rename from pyfolio/plotting.py rename to src/pyfolio/plotting.py index 93da60a9..be01c85a 100644 --- a/pyfolio/plotting.py +++ b/src/pyfolio/plotting.py @@ -444,14 +444,10 @@ def plot_drawdown_periods(returns, top=10, ax=None, **kwargs): lim = ax.get_ylim() colors = sns.cubehelix_palette(len(df_drawdowns))[::-1] - for i, (peak, recovery) in df_drawdowns[ - ["Peak date", "Recovery date"] - ].iterrows(): + for i, (peak, recovery) in df_drawdowns[["Peak date", "Recovery date"]].iterrows(): if pd.isnull(recovery): recovery = returns.index[-1] - ax.fill_between( - (peak, recovery), lim[0], lim[1], alpha=0.4, color=colors[i] - ) + ax.fill_between((peak, recovery), lim[0], lim[1], alpha=0.4, color=colors[i]) ax.set_ylim(lim) ax.set_title("Top %i drawdown periods" % top) ax.set_ylabel("Cumulative returns") @@ -623,12 +619,8 @@ def show_perf_stats( positions_is = positions[positions.index < live_start_date] positions_oos = positions[positions.index >= live_start_date] if transactions is not None: - transactions_is = transactions[ - (transactions.index < live_start_date) - ] - transactions_oos = transactions[ - (transactions.index > live_start_date) - ] + transactions_is = transactions[(transactions.index < live_start_date)] + transactions_oos = transactions[(transactions.index > live_start_date)] perf_stats_is = perf_func( returns_is, @@ -665,17 +657,13 @@ def show_perf_stats( ) else: if len(returns.index) > 0: - date_rows["Total months"] = int( - len(returns) / APPROX_BDAYS_PER_MONTH - ) + date_rows["Total months"] = int(len(returns) / APPROX_BDAYS_PER_MONTH) perf_stats = pd.DataFrame(perf_stats_all, columns=["Backtest"]) for column in perf_stats.columns: - for stat, value in perf_stats[column].iteritems(): + for stat, value in perf_stats[column].items(): if stat in STAT_FUNCS_PCT: - perf_stats.loc[stat, column] = ( - str(np.round(value * 100, 3)) + "%" - ) + perf_stats.loc[stat, column] = str(np.round(value * 100, 3)) + "%" if header_rows is None: header_rows = date_rows else: @@ -806,9 +794,7 @@ def cone(in_sample_returns (pd.Series), ax.set_yscale("log" if logy else "linear") if volatility_match and factor_returns is None: - raise ValueError( - "volatility_match requires passing of factor_returns." - ) + raise ValueError("volatility_match requires passing of factor_returns.") elif volatility_match and factor_returns is not None: bmark_vol = factor_returns.loc[returns.index].std() returns = (returns / returns.std()) * bmark_vol @@ -819,9 +805,7 @@ def cone(in_sample_returns (pd.Series), ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) if factor_returns is not None: - cum_factor_returns = ep.cum_returns( - factor_returns[cum_rets.index], 1.0 - ) + cum_factor_returns = ep.cum_returns(factor_returns[cum_rets.index], 1.0) cum_factor_returns.plot( lw=2, color="gray", @@ -837,7 +821,7 @@ def cone(in_sample_returns (pd.Series), oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date] else: is_cum_returns = cum_rets - oos_cum_returns = pd.Series([]) + oos_cum_returns = pd.Series([], dtype="float64") is_cum_returns.plot( lw=2, color="forestgreen", alpha=0.6, label="Backtest", ax=ax, **kwargs @@ -877,9 +861,7 @@ def cone(in_sample_returns (pd.Series), return ax -def plot_rolling_beta( - returns, factor_returns, legend_loc="best", ax=None, **kwargs -): +def plot_rolling_beta(returns, factor_returns, legend_loc="best", ax=None, **kwargs): """ Plots the rolling 6-month and 12-month beta versus date. @@ -983,9 +965,7 @@ def plot_rolling_volatility( rolling_vol_ts_factor = timeseries.rolling_volatility( factor_returns, rolling_window ) - rolling_vol_ts_factor.plot( - alpha=0.7, lw=2, color="grey", ax=ax, **kwargs - ) + rolling_vol_ts_factor.plot(alpha=0.7, lw=2, color="grey", ax=ax, **kwargs) ax.set_title("Rolling volatility (6-month)") ax.axhline(rolling_vol_ts.mean(), color="steelblue", linestyle="--", lw=2) @@ -1060,22 +1040,16 @@ def plot_rolling_sharpe( rolling_sharpe_ts_factor = timeseries.rolling_sharpe( factor_returns, rolling_window ) - rolling_sharpe_ts_factor.plot( - alpha=0.7, lw=2, color="grey", ax=ax, **kwargs - ) + rolling_sharpe_ts_factor.plot(alpha=0.7, lw=2, color="grey", ax=ax, **kwargs) ax.set_title("Rolling Sharpe ratio (6-month)") - ax.axhline( - rolling_sharpe_ts.mean(), color="steelblue", linestyle="--", lw=2 - ) + ax.axhline(rolling_sharpe_ts.mean(), color="steelblue", linestyle="--", lw=2) ax.axhline(0.0, color="black", linestyle="--", lw=1, zorder=2) ax.set_ylabel("Sharpe ratio") ax.set_xlabel("") if factor_returns is None: - ax.legend( - ["Sharpe", "Average"], loc=legend_loc, frameon=True, framealpha=0.5 - ) + ax.legend(["Sharpe", "Average"], loc=legend_loc, frameon=True, framealpha=0.5) else: ax.legend( ["Sharpe", "Benchmark Sharpe", "Average"], @@ -1160,9 +1134,7 @@ def plot_exposures(returns, positions, ax=None, **kwargs): ax.fill_between( l_exp.index, 0, l_exp.values, label="Long", color="green", alpha=0.5 ) - ax.fill_between( - s_exp.index, 0, s_exp.values, label="Short", color="red", alpha=0.5 - ) + ax.fill_between(s_exp.index, 0, s_exp.values, label="Short", color="red", alpha=0.5) ax.plot( net_exp.index, net_exp.values, @@ -1221,9 +1193,7 @@ def show_and_plot_top_positions( positions_alloc = positions_alloc.copy() positions_alloc.columns = positions_alloc.columns.map(utils.format_asset) - df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs( - positions_alloc - ) + df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(positions_alloc) if show_and_plot == 1 or show_and_plot == 2: utils.print_table( @@ -1345,14 +1315,10 @@ def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs): if ax is None: ax = plt.gca() - sector_alloc.plot( - title="Sector allocation over time", alpha=0.5, ax=ax, **kwargs - ) + sector_alloc.plot(title="Sector allocation over time", alpha=0.5, ax=ax, **kwargs) box = ax.get_position() - ax.set_position( - [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9] - ) + ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend( @@ -1419,7 +1385,7 @@ def plot_return_quantiles(returns, live_start_date=None, ax=None, **kwargs): sns.swarmplot( data=[oos_returns, oos_weekly, oos_monthly], ax=ax, - color="red", + palette="dark:red", marker="d", **kwargs, ) @@ -1492,12 +1458,8 @@ def plot_turnover( df_turnover = txn.get_turnover(positions, transactions, turnover_denom) df_turnover_by_month = df_turnover.resample("M").mean() df_turnover.plot(color="steelblue", alpha=1.0, lw=0.5, ax=ax, **kwargs) - df_turnover_by_month.plot( - color="orangered", alpha=0.5, lw=2, ax=ax, **kwargs - ) - ax.axhline( - df_turnover.mean(), color="steelblue", linestyle="--", lw=3, alpha=1.0 - ) + df_turnover_by_month.plot(color="orangered", alpha=0.5, lw=2, ax=ax, **kwargs) + ax.axhline(df_turnover.mean(), color="steelblue", linestyle="--", lw=3, alpha=1.0) ax.legend( [ "Daily turnover", @@ -1573,9 +1535,7 @@ def plot_slippage_sweep( return ax -def plot_slippage_sensitivity( - returns, positions, transactions, ax=None, **kwargs -): +def plot_slippage_sensitivity(returns, positions, transactions, ax=None, **kwargs): """ Plots curve relating per-dollar slippage to average annual returns. @@ -1604,7 +1564,7 @@ def plot_slippage_sensitivity( if ax is None: ax = plt.gca() - avg_returns_given_slippage = pd.Series() + avg_returns_given_slippage = pd.Series(dtype="float64") for bps in range(1, 100): adj_returns = txn.adjust_returns_for_slippage( returns, positions, transactions, bps @@ -1632,9 +1592,7 @@ def plot_capacity_sweep( step_size=1000000, ax=None, ): - txn_daily_w_bar = capacity.daily_txns_with_bar_data( - transactions, market_data - ) + txn_daily_w_bar = capacity.daily_txns_with_bar_data(transactions, market_data) captial_base_sweep = pd.Series() for start_pv in range(min_pv, max_pv, step_size): @@ -1775,9 +1733,11 @@ def plot_txn_time_hist( txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz)) txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute) txn_time["trade_value"] = (txn_time.amount * txn_time.price).abs() - txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961)) + txn_time = ( + txn_time.groupby(level=0).sum(numeric_only=True).reindex(index=range(570, 961)) + ) txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes - txn_time = txn_time.groupby(level=0).sum() + txn_time = txn_time.groupby(level=0).sum(numeric_only=True) txn_time["time_str"] = txn_time.index.map( lambda x: str(datetime.time(int(x / 60), x % 60))[:-3] @@ -1925,9 +1885,7 @@ def plot_round_trip_lifetimes(round_trips, disp_amount=16, lsize=18, ax=None): ax.set_ylim((-0.5, min(len(sample), disp_amount) - 0.5)) blue = patches.Rectangle([0, 0], 1, 1, color="b", label="Long") red = patches.Rectangle([0, 0], 1, 1, color="r", label="Short") - leg = ax.legend( - handles=[blue, red], loc="lower left", frameon=True, framealpha=0.5 - ) + leg = ax.legend(handles=[blue, red], loc="lower left", frameon=True, framealpha=0.5) leg.get_frame().set_edgecolor("black") ax.grid(False) @@ -1991,9 +1949,7 @@ def plot_prob_profit_trade(round_trips, ax=None): round_trips["profitable"] = round_trips.pnl > 0 - dist = sp.stats.beta( - round_trips.profitable.sum(), (~round_trips.profitable).sum() - ) + dist = sp.stats.beta(round_trips.profitable.sum(), (~round_trips.profitable).sum()) y = dist.pdf(x) lower_perc = dist.ppf(0.025) upper_perc = dist.ppf(0.975) @@ -2106,9 +2062,7 @@ def plot_cones( # Plot returns line graph label = "Cumulative returns = {:.2f}%".format((returns.iloc[-1] - 1) * 100) - axes.plot( - returns.index, returns.values, color="black", lw=2.0, label=label - ) + axes.plot(returns.index, returns.values, color="black", lw=2.0, label=label) if name is not None: axes.set_title(name) diff --git a/pyfolio/pos.py b/src/pyfolio/pos.py similarity index 100% rename from pyfolio/pos.py rename to src/pyfolio/pos.py diff --git a/pyfolio/round_trips.py b/src/pyfolio/round_trips.py similarity index 93% rename from pyfolio/round_trips.py rename to src/pyfolio/round_trips.py index eee9d857..88e8e043 100644 --- a/pyfolio/round_trips.py +++ b/src/pyfolio/round_trips.py @@ -12,12 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from math import copysign import warnings from collections import deque, OrderedDict +from math import copysign -import pandas as pd import numpy as np +import pandas as pd from .utils import print_table, format_asset @@ -121,9 +121,7 @@ def vwap(transaction): if transaction.amount.sum() == 0: warnings.warn("Zero transacted shares, setting vwap to nan.") return np.nan - return ( - transaction.amount * transaction.price - ).sum() / transaction.amount.sum() + return (transaction.amount * transaction.price).sum() / transaction.amount.sum() out = [] for _, t in txn.groupby("symbol"): @@ -132,12 +130,8 @@ def vwap(transaction): t = t.reset_index() t["order_sign"] = t.amount > 0 - t["block_dir"] = ( - (t.order_sign.shift(1) != t.order_sign).astype(int).cumsum() - ) - t["block_time"] = ( - ((t.dt.sub(t.dt.shift(1))) > max_delta).astype(int).cumsum() - ) + t["block_dir"] = (t.order_sign.shift(1) != t.order_sign).astype(int).cumsum() + t["block_time"] = ((t.dt.sub(t.dt.shift(1))) > max_delta).astype(int).cumsum() grouped_price = t.groupby(["block_dir", "block_time"]).apply(vwap) grouped_price.name = "price" grouped_rest = t.groupby(["block_dir", "block_time"]).agg( @@ -213,9 +207,7 @@ def extract_round_trips(transactions, portfolio_value=None): trans_sym["abs_amount"] = trans_sym.amount.abs().astype(int) for dt, t in trans_sym.iterrows(): if t.price < 0: - warnings.warn( - "Negative price detected, ignoring for" "round-trip." - ) + warnings.warn("Negative price detected, ignoring for" "round-trip.") continue indiv_prices = [t.signed_price] * t.abs_amount @@ -313,7 +305,7 @@ def add_closing_transactions(positions, transactions): # they don't conflict with other round_trips executed at that time. end_dt = open_pos.name + pd.Timedelta(seconds=1) - for sym, ending_val in open_pos.iteritems(): + for sym, ending_val in open_pos.items(): txn_sym = transactions[transactions.symbol == sym] ending_amount = txn_sym.amount.sum() @@ -328,7 +320,7 @@ def add_closing_transactions(positions, transactions): ) closing_txn = pd.DataFrame(closing_txn, index=[end_dt]) - closed_txns = closed_txns.append(closing_txn) + closed_txns = pd.concat([closed_txns, closing_txn]) closed_txns = closed_txns[closed_txns.amount != 0] @@ -386,14 +378,10 @@ def gen_round_trip_stats(round_trips): stats = {} stats["pnl"] = agg_all_long_short(round_trips, "pnl", PNL_STATS) stats["summary"] = agg_all_long_short(round_trips, "pnl", SUMMARY_STATS) - stats["duration"] = agg_all_long_short( - round_trips, "duration", DURATION_STATS - ) + stats["duration"] = agg_all_long_short(round_trips, "duration", DURATION_STATS) stats["returns"] = agg_all_long_short(round_trips, "returns", RETURN_STATS) - stats["symbols"] = ( - round_trips.groupby("symbol")["returns"].agg(RETURN_STATS).T - ) + stats["symbols"] = round_trips.groupby("symbol")["returns"].agg(RETURN_STATS).T return stats @@ -415,13 +403,9 @@ def print_round_trip_stats(round_trips, hide_pos=False): stats = gen_round_trip_stats(round_trips) - print_table( - stats["summary"], float_format="{:.2f}".format, name="Summary stats" - ) + print_table(stats["summary"], float_format="{:.2f}".format, name="Summary stats") print_table(stats["pnl"], float_format="${:.2f}".format, name="PnL stats") - print_table( - stats["duration"], float_format="{:.2f}".format, name="Duration stats" - ) + print_table(stats["duration"], float_format="{:.2f}".format, name="Duration stats") print_table( stats["returns"] * 100, float_format="{:.2f}%".format, diff --git a/pyfolio/tears.py b/src/pyfolio/tears.py similarity index 97% rename from pyfolio/tears.py rename to src/pyfolio/tears.py index 715c8b26..ea203640 100644 --- a/pyfolio/tears.py +++ b/src/pyfolio/tears.py @@ -16,12 +16,12 @@ from time import time import empyrical as ep -from IPython.display import display, Markdown import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import pandas as pd - import seaborn as sns +from IPython.display import display, Markdown + from . import capacity from . import perf_attrib from . import plotting @@ -554,9 +554,7 @@ def create_returns_tear_sheet( ax_rolling_returns = plt.subplot(gs[:2, :]) i = 2 - ax_rolling_returns_vol_match = plt.subplot( - gs[i, :], sharex=ax_rolling_returns - ) + ax_rolling_returns_vol_match = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_returns_log = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 @@ -735,9 +733,7 @@ def create_position_tear_sheet( ax=ax_top_positions, ) - plotting.plot_max_median_position_concentration( - positions, ax=ax_max_median_pos - ) + plotting.plot_max_median_position_concentration(positions, ax=ax_max_median_pos) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) @@ -753,9 +749,7 @@ def create_position_tear_sheet( sector_alloc = pos.get_percent_alloc(sector_exposures) sector_alloc = sector_alloc.drop("cash", axis="columns") ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures) - plotting.plot_sector_allocations( - returns, sector_alloc, ax=ax_sector_alloc - ) + plotting.plot_sector_allocations(returns, sector_alloc, ax=ax_sector_alloc) for ax in fig.axes: ax.tick_params( @@ -912,9 +906,7 @@ def create_round_trip_tear_sheet( estimate_intraday, returns, positions, transactions ) - transactions_closed = round_trips.add_closing_transactions( - positions, transactions - ) + transactions_closed = round_trips.add_closing_transactions(positions, transactions) # extract_round_trips requires BoD portfolio_value trades = round_trips.extract_round_trips( transactions_closed, @@ -1006,9 +998,7 @@ def create_interesting_times_tear_sheet( If True, returns the figure that was plotted on. """ - rets_interesting = timeseries.extract_interesting_date_ranges( - returns, periods - ) + rets_interesting = timeseries.extract_interesting_date_ranges(returns, periods) if not rets_interesting: warnings.warn( @@ -1158,9 +1148,7 @@ def create_capacity_tear_sheet( mean_volume_window=5, last_n_days=last_n_days, ) - max_days_by_ticker_lnd.index = max_days_by_ticker_lnd.index.map( - utils.format_asset - ) + max_days_by_ticker_lnd.index = max_days_by_ticker_lnd.index.map(utils.format_asset) print("Last {} trading days:".format(last_n_days)) utils.print_table( @@ -1174,18 +1162,14 @@ def create_capacity_tear_sheet( "Tickers with daily transactions consuming >{}% of daily bar \n" "all backtest:".format(trade_daily_vol_limit * 100) ) - utils.print_table( - llt[llt["max_pct_bar_consumed"] > trade_daily_vol_limit * 100] - ) + utils.print_table(llt[llt["max_pct_bar_consumed"] > trade_daily_vol_limit * 100]) llt = capacity.get_low_liquidity_transactions( transactions, market_data, last_n_days=last_n_days ) print("Last {} trading days:".format(last_n_days)) - utils.print_table( - llt[llt["max_pct_bar_consumed"] > trade_daily_vol_limit * 100] - ) + utils.print_table(llt[llt["max_pct_bar_consumed"] > trade_daily_vol_limit * 100]) bt_starting_capital = positions.iloc[0].sum() / (1 + returns.iloc[0]) fig, ax_capacity_sweep = plt.subplots(figsize=(14, 6)) @@ -1288,33 +1272,23 @@ def create_perf_attrib_tear_sheet( gs = gridspec.GridSpec(vertical_sections, 1, wspace=0.5, hspace=0.5) - perf_attrib.plot_returns( - perf_attrib_data, ax=plt.subplot(gs[current_section]) - ) + perf_attrib.plot_returns(perf_attrib_data, ax=plt.subplot(gs[current_section])) current_section += 1 if factor_partitions is not None: for factor_type, partitions in factor_partitions.items(): - - columns_to_select = perf_attrib_data.columns.intersection( - partitions - ) + columns_to_select = perf_attrib_data.columns.intersection(partitions) perf_attrib.plot_factor_contribution_to_perf( perf_attrib_data[columns_to_select], ax=plt.subplot(gs[current_section]), - title=("Cumulative common {} returns attribution").format( - factor_type - ), + title=("Cumulative common {} returns attribution").format(factor_type), ) current_section += 1 for factor_type, partitions in factor_partitions.items(): - - columns_to_select = portfolio_exposures.columns.intersection( - partitions - ) + columns_to_select = portfolio_exposures.columns.intersection(partitions) perf_attrib.plot_risk_exposures( portfolio_exposures[columns_to_select], diff --git a/pyfolio/timeseries.py b/src/pyfolio/timeseries.py similarity index 96% rename from pyfolio/timeseries.py rename to src/pyfolio/timeseries.py index 8e7d37b7..66220c88 100644 --- a/pyfolio/timeseries.py +++ b/src/pyfolio/timeseries.py @@ -252,9 +252,7 @@ def downside_risk(returns, required_return=0, period=DAILY): Annualized downside deviation """ - return ep.downside_risk( - returns, required_return=required_return, period=period - ) + return ep.downside_risk(returns, required_return=required_return, period=period) @deprecated(msg=DEPRECATION_WARNING) @@ -502,9 +500,7 @@ def aggregate_returns(returns, convert_to): return ep.aggregate_returns(returns, convert_to=convert_to) -def rolling_beta( - returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6 -): +def rolling_beta(returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6): """ Determines the rolling beta of a strategy. @@ -538,13 +534,11 @@ def rolling_beta( partial(rolling_beta, returns), rolling_window=rolling_window ) else: - out = pd.Series(index=returns.index) + out = pd.Series(index=returns.index, dtype="float64") for beg, end in zip( returns.index[0:-rolling_window], returns.index[rolling_window:] ): - out.loc[end] = ep.beta( - returns.loc[beg:end], factor_returns.loc[beg:end] - ) + out.loc[end] = ep.beta(returns.loc[beg:end], factor_returns.loc[beg:end]) return out @@ -731,7 +725,7 @@ def perf_stats( Performance metrics. """ - stats = pd.Series() + stats = pd.Series(dtype="float64") for stat_func in SIMPLE_STAT_FUNCS: stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns) @@ -749,9 +743,7 @@ def perf_stats( return stats -def perf_stats_bootstrap( - returns, factor_returns=None, return_stats=True, **kwargs -): +def perf_stats_bootstrap(returns, factor_returns=None, return_stats=True, **kwargs): """Calculates various bootstrapped performance metrics of a strategy. Parameters @@ -931,7 +923,7 @@ def get_max_drawdown(returns): """ returns = returns.copy() - df_cum = cum_returns(returns, 1.0) + df_cum = ep.cum_returns(returns, 1.0) running_max = np.maximum.accumulate(df_cum) underwater = df_cum / running_max - 1 return get_max_drawdown_underwater(underwater) @@ -965,19 +957,13 @@ def get_top_drawdowns(returns, top=10): peak, valley, recovery = get_max_drawdown_underwater(underwater) # Slice out draw-down period if not pd.isnull(recovery): - underwater.drop( - underwater[peak:recovery].index[1:-1], inplace=True - ) + underwater.drop(underwater[peak:recovery].index[1:-1], inplace=True) else: # drawdown has not ended yet underwater = underwater.loc[:peak] drawdowns.append((peak, valley, recovery)) - if ( - (len(returns) == 0) - or (len(underwater) == 0) - or (np.min(underwater) == 0) - ): + if (len(returns) == 0) or (len(underwater) == 0) or (np.min(underwater) == 0): break return drawdowns @@ -1021,27 +1007,21 @@ def gen_drawdown_table(returns, top=10): df_drawdowns.loc[i, "Duration"] = len( pd.date_range(peak, recovery, freq="B") ) - df_drawdowns.loc[i, "Peak date"] = peak.to_pydatetime().strftime( - "%Y-%m-%d" - ) - df_drawdowns.loc[i, "Valley date"] = valley.to_pydatetime().strftime( - "%Y-%m-%d" - ) + df_drawdowns.loc[i, "Peak date"] = peak.to_pydatetime().strftime("%Y-%m-%d") + df_drawdowns.loc[i, "Valley date"] = valley.to_pydatetime().strftime("%Y-%m-%d") if isinstance(recovery, float): df_drawdowns.loc[i, "Recovery date"] = recovery else: - df_drawdowns.loc[ - i, "Recovery date" - ] = recovery.to_pydatetime().strftime("%Y-%m-%d") + df_drawdowns.loc[i, "Recovery date"] = recovery.to_pydatetime().strftime( + "%Y-%m-%d" + ) df_drawdowns.loc[i, "Net drawdown in %"] = ( (df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak] ) * 100 df_drawdowns["Peak date"] = pd.to_datetime(df_drawdowns["Peak date"]) df_drawdowns["Valley date"] = pd.to_datetime(df_drawdowns["Valley date"]) - df_drawdowns["Recovery date"] = pd.to_datetime( - df_drawdowns["Recovery date"] - ) + df_drawdowns["Recovery date"] = pd.to_datetime(df_drawdowns["Recovery date"]) return df_drawdowns @@ -1064,9 +1044,7 @@ def rolling_volatility(returns, rolling_vol_window): Rolling volatility. """ - return returns.rolling(rolling_vol_window).std() * np.sqrt( - APPROX_BDAYS_PER_YEAR - ) + return returns.rolling(rolling_vol_window).std() * np.sqrt(APPROX_BDAYS_PER_YEAR) def rolling_sharpe(returns, rolling_sharpe_window): @@ -1129,9 +1107,7 @@ def simulate_paths( samples = np.empty((num_samples, num_days)) seed = np.random.RandomState(seed=random_seed) for i in range(num_samples): - samples[i, :] = is_returns.sample( - num_days, replace=True, random_state=seed - ) + samples[i, :] = is_returns.sample(num_days, replace=True, random_state=seed) return samples @@ -1163,7 +1139,7 @@ def summarize_paths(samples, cone_std=(1.0, 1.5, 2.0), starting_value=1.0): if isinstance(cone_std, (float, int)): cone_std = [cone_std] - cone_bounds = pd.DataFrame(columns=pd.Float64Index([])) + cone_bounds = pd.DataFrame(columns=pd.Index([], dtype="float64")) for num_std in cone_std: cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std @@ -1250,6 +1226,7 @@ def extract_interesting_date_ranges(returns, periods=None): """ if periods is None: periods = PERIODS + returns_dupe = returns.copy() returns_dupe.index = returns_dupe.index.map(pd.Timestamp) ranges = OrderedDict() diff --git a/pyfolio/txn.py b/src/pyfolio/txn.py similarity index 98% rename from pyfolio/txn.py rename to src/pyfolio/txn.py index 40b06b26..3bb3f3de 100644 --- a/pyfolio/txn.py +++ b/src/pyfolio/txn.py @@ -110,9 +110,7 @@ def get_txn_vol(transactions): return pd.concat([daily_values, daily_amounts], axis=1) -def adjust_returns_for_slippage( - returns, positions, transactions, slippage_bps -): +def adjust_returns_for_slippage(returns, positions, transactions, slippage_bps): """ Apply a slippage penalty for every dollar traded. diff --git a/pyfolio/utils.py b/src/pyfolio/utils.py similarity index 97% rename from pyfolio/utils.py rename to src/pyfolio/utils.py index bb91caab..f3ed5801 100644 --- a/pyfolio/utils.py +++ b/src/pyfolio/utils.py @@ -14,14 +14,14 @@ # limitations under the License. import warnings - from itertools import cycle -from matplotlib.pyplot import cm + +import empyrical.utils import numpy as np import pandas as pd from IPython.display import display, HTML -from distutils.version import StrictVersion -import empyrical.utils +from matplotlib.pyplot import cm +from packaging.version import Version from . import pos from . import txn @@ -68,9 +68,9 @@ "#808080", ] -pandas_version = StrictVersion(pd.__version__) +pandas_version = Version(pd.__version__) -pandas_one_point_one_or_less = pandas_version < StrictVersion("1.2") +pandas_one_point_one_or_less = pandas_version < Version("1.2") def one_dec_places(x, pos): @@ -171,7 +171,7 @@ def extract_rets_pos_txn_from_zipline(backtest): backtest.index = backtest.index.tz_localize("UTC") returns = backtest.returns raw_positions = [] - for dt, pos_row in backtest.positions.iteritems(): + for dt, pos_row in backtest.positions.items(): df = pd.DataFrame(pos_row) df.index = [dt] * len(df) raw_positions.append(df) @@ -186,9 +186,7 @@ def extract_rets_pos_txn_from_zipline(backtest): return returns, positions, transactions -def print_table( - table, name=None, float_format=None, formatters=None, header_rows=None -): +def print_table(table, name=None, float_format=None, formatters=None, header_rows=None): """ Pretty print a pandas DataFrame. @@ -517,9 +515,7 @@ def configure_legend( - set colors according to colormap """ chartBox = ax.get_position() - ax.set_position( - [chartBox.x0, chartBox.y0, chartBox.width * 0.75, chartBox.height] - ) + ax.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.75, chartBox.height]) # make legend order match graph lines handles, labels = ax.get_legend_handles_labels() diff --git a/pyfolio/tests/__init__.py b/tests/__init__.py similarity index 100% rename from pyfolio/tests/__init__.py rename to tests/__init__.py diff --git a/pyfolio/tests/matplotlibrc b/tests/matplotlibrc similarity index 100% rename from pyfolio/tests/matplotlibrc rename to tests/matplotlibrc diff --git a/pyfolio/tests/test_capacity.py b/tests/test_capacity.py similarity index 87% rename from pyfolio/tests/test_capacity.py rename to tests/test_capacity.py index 28149cb5..0cffb826 100644 --- a/pyfolio/tests/test_capacity.py +++ b/tests/test_capacity.py @@ -36,16 +36,12 @@ class CapacityTestCase(TestCase): volume *= 1000000 volume["market_data"] = "volume" - price = pd.DataFrame( - [[1.0, 1.0]] * len(dates), columns=["A", "B"], index=dates - ) + price = pd.DataFrame([[1.0, 1.0]] * len(dates), columns=["A", "B"], index=dates) price.index.name = "dt" price["market_data"] = "price" market_data = ( - pd.concat([volume, price]) - .reset_index() - .set_index(["dt", "market_data"]) + pd.concat([volume, price]).reset_index().set_index(["dt", "market_data"]) ) def test_days_to_liquidate_positions(self): @@ -126,9 +122,7 @@ def test_get_low_liquidity_transactions(self, expected, last_n_days): assert_frame_equal(llt, expected) def test_daily_txns_with_bar_data(self): - daily_txn = daily_txns_with_bar_data( - self.transactions, self.market_data - ) + daily_txn = daily_txns_with_bar_data(self.transactions, self.market_data) columns = ["symbol", "amount", "price", "volume"] expected = pd.DataFrame( @@ -151,19 +145,13 @@ def test_daily_txns_with_bar_data(self): (1000000, 0.1, [0.99995, 0.99999375, 0.999998611]), ] ) - def test_apply_slippage_penalty( - self, starting_base, impact, expected_adj_returns - ): + def test_apply_slippage_penalty(self, starting_base, impact, expected_adj_returns): returns = pd.Series([1.0, 1.0, 1.0], index=self.dates) - daily_txn = daily_txns_with_bar_data( - self.transactions, self.market_data - ) + daily_txn = daily_txns_with_bar_data(self.transactions, self.market_data) adj_returns = apply_slippage_penalty( returns, daily_txn, starting_base, 1000000, impact=impact ) - expected_adj_returns = pd.Series( - expected_adj_returns, index=self.dates - ) + expected_adj_returns = pd.Series(expected_adj_returns, index=self.dates) assert_series_equal(adj_returns, expected_adj_returns) diff --git a/pyfolio/tests/test_data/factor_loadings.csv b/tests/test_data/factor_loadings.csv similarity index 100% rename from pyfolio/tests/test_data/factor_loadings.csv rename to tests/test_data/factor_loadings.csv diff --git a/pyfolio/tests/test_data/factor_returns.csv b/tests/test_data/factor_returns.csv similarity index 100% rename from pyfolio/tests/test_data/factor_returns.csv rename to tests/test_data/factor_returns.csv diff --git a/pyfolio/tests/test_data/intercepts.csv b/tests/test_data/intercepts.csv similarity index 100% rename from pyfolio/tests/test_data/intercepts.csv rename to tests/test_data/intercepts.csv diff --git a/pyfolio/tests/test_data/positions.csv b/tests/test_data/positions.csv similarity index 100% rename from pyfolio/tests/test_data/positions.csv rename to tests/test_data/positions.csv diff --git a/pyfolio/tests/test_data/residuals.csv b/tests/test_data/residuals.csv similarity index 100% rename from pyfolio/tests/test_data/residuals.csv rename to tests/test_data/residuals.csv diff --git a/pyfolio/tests/test_data/returns.csv b/tests/test_data/returns.csv similarity index 100% rename from pyfolio/tests/test_data/returns.csv rename to tests/test_data/returns.csv diff --git a/pyfolio/tests/test_data/test_LMCAP.csv b/tests/test_data/test_LMCAP.csv similarity index 100% rename from pyfolio/tests/test_data/test_LMCAP.csv rename to tests/test_data/test_LMCAP.csv diff --git a/pyfolio/tests/test_data/test_LT_MOMENTUM.csv b/tests/test_data/test_LT_MOMENTUM.csv similarity index 100% rename from pyfolio/tests/test_data/test_LT_MOMENTUM.csv rename to tests/test_data/test_LT_MOMENTUM.csv diff --git a/pyfolio/tests/test_data/test_MACDSignal.csv b/tests/test_data/test_MACDSignal.csv similarity index 100% rename from pyfolio/tests/test_data/test_MACDSignal.csv rename to tests/test_data/test_MACDSignal.csv diff --git a/pyfolio/tests/test_data/test_VLTY.csv b/tests/test_data/test_VLTY.csv similarity index 100% rename from pyfolio/tests/test_data/test_VLTY.csv rename to tests/test_data/test_VLTY.csv diff --git a/pyfolio/tests/test_data/test_caps.csv b/tests/test_data/test_caps.csv similarity index 100% rename from pyfolio/tests/test_data/test_caps.csv rename to tests/test_data/test_caps.csv diff --git a/pyfolio/tests/test_data/test_gross_lev.csv.gz b/tests/test_data/test_gross_lev.csv.gz similarity index 100% rename from pyfolio/tests/test_data/test_gross_lev.csv.gz rename to tests/test_data/test_gross_lev.csv.gz diff --git a/pyfolio/tests/test_data/test_pos.csv.gz b/tests/test_data/test_pos.csv.gz similarity index 100% rename from pyfolio/tests/test_data/test_pos.csv.gz rename to tests/test_data/test_pos.csv.gz diff --git a/pyfolio/tests/test_data/test_returns.csv.gz b/tests/test_data/test_returns.csv.gz similarity index 100% rename from pyfolio/tests/test_data/test_returns.csv.gz rename to tests/test_data/test_returns.csv.gz diff --git a/pyfolio/tests/test_data/test_sectors.csv b/tests/test_data/test_sectors.csv similarity index 100% rename from pyfolio/tests/test_data/test_sectors.csv rename to tests/test_data/test_sectors.csv diff --git a/pyfolio/tests/test_data/test_shares_held.csv b/tests/test_data/test_shares_held.csv similarity index 100% rename from pyfolio/tests/test_data/test_shares_held.csv rename to tests/test_data/test_shares_held.csv diff --git a/pyfolio/tests/test_data/test_txn.csv.gz b/tests/test_data/test_txn.csv.gz similarity index 100% rename from pyfolio/tests/test_data/test_txn.csv.gz rename to tests/test_data/test_txn.csv.gz diff --git a/pyfolio/tests/test_data/test_volumes.csv b/tests/test_data/test_volumes.csv similarity index 100% rename from pyfolio/tests/test_data/test_volumes.csv rename to tests/test_data/test_volumes.csv diff --git a/pyfolio/tests/test_perf_attrib.py b/tests/test_perf_attrib.py similarity index 88% rename from pyfolio/tests/test_perf_attrib.py rename to tests/test_perf_attrib.py index 34886ea7..7e5163d0 100644 --- a/pyfolio/tests/test_perf_attrib.py +++ b/tests/test_perf_attrib.py @@ -2,7 +2,7 @@ import pandas as pd import unittest import warnings - +from pathlib import Path import empyrical as ep from pyfolio.perf_attrib import ( perf_attrib, @@ -10,6 +10,8 @@ _cumulative_returns_less_costs, ) +TEST_DATA = Path(__file__).parent / "test_data" + def _empyrical_compat_perf_attrib_result(index, columns, data): if ep.__version__ < "0.5.2": @@ -27,9 +29,7 @@ def _empyrical_compat_perf_attrib_result(index, columns, data): return pd.DataFrame(index=index, columns=columns, data=data) -def generate_toy_risk_model_output( - start_date="2017-01-01", periods=10, num_styles=2 -): +def generate_toy_risk_model_output(start_date="2017-01-01", periods=10, num_styles=2): """ Generate toy risk model output. @@ -128,9 +128,7 @@ def test_perf_attrib_simple(self): data={"stock1": [20, 20], "stock2": [50, 50], "cash": [0, 0]}, ) - index = pd.MultiIndex.from_product( - [dts, tickers], names=["dt", "ticker"] - ) + index = pd.MultiIndex.from_product([dts, tickers], names=["dt", "ticker"]) factor_loadings = pd.DataFrame( columns=styles, @@ -173,13 +171,9 @@ def test_perf_attrib_simple(self): returns, positions, factor_returns, factor_loadings ) - pd.util.testing.assert_frame_equal( - expected_perf_attrib_output, perf_attrib_output - ) + pd.testing.assert_frame_equal(expected_perf_attrib_output, perf_attrib_output) - pd.util.testing.assert_frame_equal( - expected_exposures_portfolio, exposures_portfolio - ) + pd.testing.assert_frame_equal(expected_exposures_portfolio, exposures_portfolio) # test long and short positions positions = pd.DataFrame( @@ -219,13 +213,9 @@ def test_perf_attrib_simple(self): data={"risk_factor1": [0.0, 0.0], "risk_factor2": [0.0, 0.0]}, ) - pd.util.testing.assert_frame_equal( - expected_perf_attrib_output, perf_attrib_output - ) + pd.testing.assert_frame_equal(expected_perf_attrib_output, perf_attrib_output) - pd.util.testing.assert_frame_equal( - expected_exposures_portfolio, exposures_portfolio - ) + pd.testing.assert_frame_equal(expected_exposures_portfolio, exposures_portfolio) perf_attrib_summary, exposures_summary = create_perf_attrib_stats( perf_attrib_output, exposures_portfolio @@ -241,7 +231,7 @@ def test_perf_attrib_simple(self): perf_attrib_summary["Total Returns"], ) - pd.util.testing.assert_frame_equal( + pd.testing.assert_frame_equal( exposures_summary, pd.DataFrame( 0.0, @@ -257,7 +247,7 @@ def test_perf_attrib_simple(self): def test_perf_attrib_regression(self): positions = pd.read_csv( - "pyfolio/tests/test_data/positions.csv", + TEST_DATA / "positions.csv", index_col=0, parse_dates=True, ) @@ -267,27 +257,26 @@ def test_perf_attrib_regression(self): ] returns = pd.read_csv( - "pyfolio/tests/test_data/returns.csv", + TEST_DATA / "returns.csv", index_col=0, parse_dates=True, header=None, - squeeze=True, - ) + ).squeeze() factor_loadings = pd.read_csv( - "pyfolio/tests/test_data/factor_loadings.csv", + TEST_DATA / "factor_loadings.csv", index_col=[0, 1], parse_dates=True, ) factor_returns = pd.read_csv( - "pyfolio/tests/test_data/factor_returns.csv", + TEST_DATA / "factor_returns.csv", index_col=0, parse_dates=True, ) residuals = pd.read_csv( - "pyfolio/tests/test_data/residuals.csv", + TEST_DATA / "residuals.csv", index_col=0, parse_dates=True, ) @@ -295,11 +284,10 @@ def test_perf_attrib_regression(self): residuals.columns = [int(col) for col in residuals.columns] intercepts = pd.read_csv( - "pyfolio/tests/test_data/intercepts.csv", + TEST_DATA / "intercepts.csv", index_col=0, header=None, - squeeze=True, - ) + ).squeeze() risk_exposures_portfolio, perf_attrib_output = perf_attrib( returns, @@ -314,16 +302,12 @@ def test_perf_attrib_regression(self): # since all returns are factor returns, common returns should be # equivalent to total returns, and specific returns should be 0 - pd.util.testing.assert_series_equal( - returns, common_returns, check_names=False - ) + pd.testing.assert_series_equal(returns, common_returns, check_names=False) self.assertTrue(np.isclose(specific_returns, 0).all()) # specific and common returns combined should equal total returns - pd.util.testing.assert_series_equal( - returns, combined_returns, check_names=False - ) + pd.testing.assert_series_equal(returns, combined_returns, check_names=False) # check that residuals + intercepts = specific returns self.assertTrue(np.isclose((residuals + intercepts), 0).all()) @@ -333,13 +317,13 @@ def test_perf_attrib_regression(self): factor_returns, axis="rows" ).sum(axis="columns") - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( expected_common_returns, common_returns, check_names=False ) # since factor loadings are ones, portfolio risk exposures # should be ones - pd.util.testing.assert_frame_equal( + pd.testing.assert_frame_equal( risk_exposures_portfolio, pd.DataFrame( np.ones_like(risk_exposures_portfolio), @@ -390,7 +374,7 @@ def test_perf_attrib_regression(self): avg_factor_exposure = risk_exposures_portfolio.mean().rename( "Average Risk Factor Exposure" ) - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( avg_factor_exposure, exposures_summary["Average Risk Factor Exposure"], ) @@ -404,7 +388,7 @@ def test_perf_attrib_regression(self): index=risk_exposures_portfolio.columns, ) - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( cumulative_returns_by_factor, exposures_summary["Cumulative Return"], ) @@ -418,7 +402,7 @@ def test_perf_attrib_regression(self): index=risk_exposures_portfolio.columns, ) - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( annualized_returns_by_factor, exposures_summary["Annualized Return"], ) @@ -433,9 +417,7 @@ def test_missing_stocks_and_dates(self): ) = generate_toy_risk_model_output() # factor loadings missing a stock should raise a warning - factor_loadings_missing_stocks = factor_loadings.drop( - "TLT", level="ticker" - ) + factor_loadings_missing_stocks = factor_loadings.drop("TLT", level="ticker") with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", UserWarning) @@ -450,13 +432,10 @@ def test_missing_stocks_and_dates(self): w_ = [warn for warn in w if issubclass(warn.category, UserWarning)] self.assertEqual(len(w_), 1) self.assertIn( - "The following assets were missing factor loadings: " - "['TLT']", + "The following assets were missing factor loadings: " "['TLT']", str(w_[-1].message), ) - self.assertIn( - "Ratio of assets missing: 0.333", str(w_[-1].message) - ) + self.assertIn("Ratio of assets missing: 0.333", str(w_[-1].message)) # missing dates should raise a warning missing_dates = ["2017-01-01", "2017-01-05"] @@ -513,13 +492,10 @@ def test_missing_stocks_and_dates(self): w_ = [warn for warn in w if issubclass(warn.category, UserWarning)] self.assertEqual(len(w_), 5) self.assertIn( - "The following assets were missing factor loadings: " - "['TLT']", + "The following assets were missing factor loadings: " "['TLT']", str(w_[-2].message), ) - self.assertIn( - "Ratio of assets missing: 0.333", str(w_[-2].message) - ) + self.assertIn("Ratio of assets missing: 0.333", str(w_[-2].message)) self.assertIn( "Could not find factor loadings for " @@ -580,19 +556,17 @@ def test_high_turnover_warning(self): def test_cumulative_returns_less_costs(self): - returns = pd.Series( - [0.1] * 3, index=pd.date_range("2017-01-01", periods=3) - ) + returns = pd.Series([0.1] * 3, index=pd.date_range("2017-01-01", periods=3)) cost = pd.Series([0.001] * len(returns), index=returns.index) expected_returns = pd.Series([0.1, 0.21, 0.331], index=returns.index) - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( expected_returns, _cumulative_returns_less_costs(returns, None) ) expected_returns = pd.Series( [0.099000, 0.207801, 0.327373], index=returns.index ) - pd.util.testing.assert_series_equal( + pd.testing.assert_series_equal( expected_returns, _cumulative_returns_less_costs(returns, cost) ) diff --git a/pyfolio/tests/test_pos.py b/tests/test_pos.py similarity index 96% rename from pyfolio/tests/test_pos.py rename to tests/test_pos.py index 824d6def..8bcff74a 100644 --- a/pyfolio/tests/test_pos.py +++ b/tests/test_pos.py @@ -152,9 +152,7 @@ def test_sector_exposure( with warnings.catch_warnings(record=True) as w: result_sector_exposure = get_sector_exposures(positions, mapping) - assert_frame_equal( - result_sector_exposure, expected_sector_exposure - ) + assert_frame_equal(result_sector_exposure, expected_sector_exposure) # avoids test failure due to DeprecationWarning for pandas>=1.0, <1.1 w_ = [warn for warn in w if issubclass(warn.category, UserWarning)] if warning_expected: @@ -262,9 +260,7 @@ def test_detect_intraday(self, positions, transactions, expected): (False, test_returns, test_pos, test_txn, test_pos), ] ) - def test_check_intraday( - self, estimate, returns, positions, transactions, expected - ): + def test_check_intraday(self, estimate, returns, positions, transactions, expected): detected = check_intraday(estimate, returns, positions, transactions) assert_frame_equal(detected, expected) @@ -279,8 +275,6 @@ def test_check_intraday( ), ] ) - def test_estimate_intraday( - self, returns, positions, transactions, expected - ): + def test_estimate_intraday(self, returns, positions, transactions, expected): intraday_pos = estimate_intraday(returns, positions, transactions) assert intraday_pos.shape == expected diff --git a/pyfolio/tests/test_round_trips.py b/tests/test_round_trips.py similarity index 96% rename from pyfolio/tests/test_round_trips.py rename to tests/test_round_trips.py index 4311a60d..fa789a49 100644 --- a/pyfolio/tests/test_round_trips.py +++ b/tests/test_round_trips.py @@ -248,12 +248,8 @@ def test_groupby_consecutive(self, transactions, expected): ), ] ) - def test_extract_round_trips( - self, transactions, expected, portfolio_value=None - ): - round_trips = extract_round_trips( - transactions, portfolio_value=portfolio_value - ) + def test_extract_round_trips(self, transactions, expected, portfolio_value=None): + round_trips = extract_round_trips(transactions, portfolio_value=portfolio_value) assert_frame_equal( round_trips.sort_index(axis="columns"), @@ -273,9 +269,7 @@ def test_add_closing_trades(self): index=dates[:3], ) - expected_ix = dates[:3].append( - DatetimeIndex([dates[2] + Timedelta(seconds=1)]) - ) + expected_ix = dates[:3].append(DatetimeIndex([dates[2] + Timedelta(seconds=1)])) expected = DataFrame( data=[ ["A", 2, 10], diff --git a/pyfolio/tests/test_tears.py b/tests/test_tears.py similarity index 59% rename from pyfolio/tests/test_tears.py rename to tests/test_tears.py index 31eee0e7..e1d1894d 100644 --- a/pyfolio/tests/test_tears.py +++ b/tests/test_tears.py @@ -1,14 +1,17 @@ -from matplotlib.testing.decorators import cleanup - -from unittest import TestCase -from parameterized import parameterized - -import os +import functools +from pathlib import Path import gzip +import inspect +import os +import warnings +from contextlib import contextmanager +from unittest import TestCase +import matplotlib +import matplotlib.pyplot as plt from pandas import read_csv +from parameterized import parameterized -from pyfolio.utils import to_utc, to_series from pyfolio.tears import ( create_full_tear_sheet, create_simple_tear_sheet, @@ -18,29 +21,87 @@ create_round_trip_tear_sheet, create_interesting_times_tear_sheet, ) +from pyfolio.utils import to_utc, to_series + + +@contextmanager +def _cleanup_cm(): + orig_units_registry = matplotlib.units.registry.copy() + try: + with warnings.catch_warnings(), matplotlib.rc_context(): + yield + finally: + matplotlib.units.registry.clear() + matplotlib.units.registry.update(orig_units_registry) + plt.close("all") + + +def cleanup(style=None): + """ + A decorator to ensure that any global state is reset before + running a test. + + Parameters + ---------- + style : str, dict, or list, optional + The style(s) to apply. Defaults to ``["classic", + "_classic_test_patch"]``. + """ + + # If cleanup is used without arguments, *style* will be a callable, and we + # pass it directly to the wrapper generator. If cleanup if called with an + # argument, it is a string naming a style, and the function will be passed + # as an argument to what we return. This is a confusing, but somewhat + # standard, pattern for writing a decorator with optional arguments. + + def make_cleanup(func): + if inspect.isgeneratorfunction(func): + + @functools.wraps(func) + def wrapped_callable(*args, **kwargs): + with _cleanup_cm(), matplotlib.style.context(style): + yield from func(*args, **kwargs) + + else: + + @functools.wraps(func) + def wrapped_callable(*args, **kwargs): + with _cleanup_cm(), matplotlib.style.context(style): + func(*args, **kwargs) + + return wrapped_callable + + if callable(style): + result = make_cleanup(style) + # Default of mpl_test_settings fixture and image_comparison too. + style = ["classic", "_classic_test_patch"] + return result + else: + return make_cleanup class PositionsTestCase(TestCase): - __location__ = os.path.realpath( - os.path.join(os.getcwd(), os.path.dirname(__file__)) - ) + TEST_DATA = Path(__file__).parent / "test_data" + # __location__ = os.path.realpath( + # os.path.join(os.getcwd(), os.path.dirname(__file__)) + # ) test_returns = read_csv( - gzip.open(__location__ + "/test_data/test_returns.csv.gz"), + gzip.open(TEST_DATA / "test_returns.csv.gz"), index_col=0, parse_dates=True, ) test_returns = to_series(to_utc(test_returns)) test_txn = to_utc( read_csv( - gzip.open(__location__ + "/test_data/test_txn.csv.gz"), + gzip.open(TEST_DATA / "test_txn.csv.gz"), index_col=0, parse_dates=True, ) ) test_pos = to_utc( read_csv( - gzip.open(__location__ + "/test_data/test_pos.csv.gz"), + gzip.open(TEST_DATA / "test_pos.csv.gz"), index_col=0, parse_dates=True, ) @@ -117,9 +178,7 @@ def test_create_position_tear_sheet_breakdown(self, kwargs): ) @cleanup def test_create_txn_tear_sheet_breakdown(self, kwargs): - create_txn_tear_sheet( - self.test_returns, self.test_pos, self.test_txn, **kwargs - ) + create_txn_tear_sheet(self.test_returns, self.test_pos, self.test_txn, **kwargs) @parameterized.expand( [ diff --git a/pyfolio/tests/test_timeseries.py b/tests/test_timeseries.py similarity index 89% rename from pyfolio/tests/test_timeseries.py rename to tests/test_timeseries.py index 0ca27e12..c8fc6301 100644 --- a/pyfolio/tests/test_timeseries.py +++ b/tests/test_timeseries.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd -from .. import timeseries +from src.pyfolio import timeseries from pyfolio.utils import to_utc, to_series, pandas_one_point_one_or_less import gzip @@ -93,26 +93,18 @@ def test_gen_drawdown_table_relative( np.round(drawdowns.loc[0, "Net drawdown in %"]), first_net_drawdown ) self.assertEqual(drawdowns.loc[0, "Peak date"], first_expected_peak) - self.assertEqual( - drawdowns.loc[0, "Valley date"], first_expected_valley - ) - self.assertEqual( - drawdowns.loc[0, "Recovery date"], first_expected_recovery - ) + self.assertEqual(drawdowns.loc[0, "Valley date"], first_expected_valley) + self.assertEqual(drawdowns.loc[0, "Recovery date"], first_expected_recovery) self.assertEqual( np.round(drawdowns.loc[1, "Net drawdown in %"]), second_net_drawdown, ) self.assertEqual(drawdowns.loc[1, "Peak date"], second_expected_peak) - self.assertEqual( - drawdowns.loc[1, "Valley date"], second_expected_valley - ) + self.assertEqual(drawdowns.loc[1, "Valley date"], second_expected_valley) self.assertTrue(pd.isnull(drawdowns.loc[1, "Recovery date"])) - px_list_1 = ( - np.array([100, 120, 100, 80, 70, 110, 180, 150]) / 100.0 - ) # Simple + px_list_1 = np.array([100, 120, 100, 80, 70, 110, 180, 150]) / 100.0 # Simple px_list_2 = ( np.array([100, 120, 100, 80, 70, 80, 90, 90]) / 100.0 ) # Ends in drawdown @@ -141,14 +133,12 @@ def test_get_max_drawdown( peak, valley, recovery = timeseries.get_max_drawdown(rets) # Need to use isnull because the result can be NaN, NaT, etc. - self.assertTrue( - pd.isnull(peak) - ) if expected_peak is None else self.assertEqual(peak, expected_peak) + self.assertTrue(pd.isnull(peak)) if expected_peak is None else self.assertEqual( + peak, expected_peak + ) self.assertTrue( pd.isnull(valley) - ) if expected_valley is None else self.assertEqual( - valley, expected_valley - ) + ) if expected_valley is None else self.assertEqual(valley, expected_valley) self.assertTrue( pd.isnull(recovery) ) if expected_recovery is None else self.assertEqual( @@ -212,9 +202,9 @@ def test_drawdown_overlaps(self): rand.standard_t(3.1, n_samples), pd.date_range("2005-01-02", periods=n_samples), ) - spy_drawdowns = timeseries.gen_drawdown_table( - spy_returns, top=20 - ).sort_values(by="Peak date") + spy_drawdowns = timeseries.gen_drawdown_table(spy_returns, top=20).sort_values( + by="Peak date" + ) # Compare the recovery date of each drawdown with the peak of the next # Last pair might contain a NaT if drawdown didn't finish, so ignore it pairs = list( @@ -244,17 +234,13 @@ def test_drawdown_overlaps(self): ] ) def test_top_drawdowns(self, returns, top, expected): - self.assertEqual( - timeseries.get_top_drawdowns(returns, top=top), expected - ) + self.assertEqual(timeseries.get_top_drawdowns(returns, top=top), expected) class TestVariance(TestCase): @parameterized.expand([(1e7, 0.5, 1, 1, -10000000.0)]) def test_var_cov_var_normal(self, P, c, mu, sigma, expected): - self.assertEqual( - timeseries.var_cov_var_normal(P, c, mu, sigma), expected - ) + self.assertEqual(timeseries.var_cov_var_normal(P, c, mu, sigma), expected) class TestNormalize(TestCase): @@ -307,14 +293,10 @@ class TestStats(TestCase): ) ] ) - @skipIf( - pandas_one_point_one_or_less, "pandas<1.2 returns np.inf not np.nan" - ) + @skipIf(pandas_one_point_one_or_less, "pandas<1.2 returns np.inf not np.nan") def test_sharpe_2(self, returns, rolling_sharpe_window, expected): np.testing.assert_array_almost_equal( - timeseries.rolling_sharpe( - returns, rolling_sharpe_window - ).to_numpy(), + timeseries.rolling_sharpe(returns, rolling_sharpe_window).to_numpy(), np.asarray(expected), ) @@ -342,7 +324,7 @@ def test_bootstrap_cone_against_linear_cone_normal_returns(self): midline = np.cumprod(1 + (rets.mean() * np.ones(days_forward))) stdev = rets.std() * midline * np.sqrt(np.arange(days_forward) + 1) - normal_cone = pd.DataFrame(columns=pd.Float64Index([])) + normal_cone = pd.DataFrame(columns=pd.Index([], dtype="float64")) for s in cone_stdevs: normal_cone[s] = midline + s * stdev normal_cone[-s] = midline - s * stdev @@ -356,7 +338,7 @@ def test_bootstrap_cone_against_linear_cone_normal_returns(self): num_samples=10000, ) - for col, vals in bootstrap_cone.iteritems(): + for col, vals in bootstrap_cone.items(): expected = normal_cone[col].values assert_allclose(vals.values, expected, rtol=0.005) @@ -396,8 +378,7 @@ def test_calc_bootstrap(self, true_mean, true_sd, n): np.std(samples), sd_of_mean, 3, - "SD of bootstrap does not match theoretical SD of" - "sampling distribution", + "SD of bootstrap does not match theoretical SD of" "sampling distribution", ) diff --git a/pyfolio/tests/test_txn.py b/tests/test_txn.py similarity index 96% rename from pyfolio/tests/test_txn.py rename to tests/test_txn.py index d78c09a7..c082bc30 100644 --- a/pyfolio/tests/test_txn.py +++ b/tests/test_txn.py @@ -57,9 +57,7 @@ def test_get_turnover(self): # Our portfolio value alternates between $20 and $50 so turnover # should alternate between 20/20 = 1.0 and 20/50 = 0.4. - expected = pd.Series( - [0.4, 1.0] * (int((len(dates) - 1) / 2) + 1), index=dates - ) + expected = pd.Series([0.4, 1.0] * (int((len(dates) - 1) / 2) + 1), index=dates) assert_series_equal(result, expected) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 08c7482c..00000000 --- a/tox.ini +++ /dev/null @@ -1,45 +0,0 @@ -[tox] -envlist = py{37,38}-pandas{10,11,12}, py39-pandas{11,12} -isolated_build = True -skip_missing_interpreters = True -requires = setuptools >=42.0.0 - pip >=21.0 - wheel >0.36.0 - tox-gh-actions - -[gh-actions] -python = - 3.7: py37 - 3.8: py38 - 3.9: py39 - -[flake8] -max-line-length = 79 -ignore = E203, E266, E501, W503, F403, F401 -max-complexity = 18 -select = B,C,E,F,W,T4,B9 -exclude = - .git, - __pycache__, - docs/source/conf.py, - versioneer.py, - pyfolio/_version.py - -[testenv] -setenv = - MPLBACKEND = Agg - COVERAGE_FILE=.coverage.{envname} - -deps = - pytest - parameterized - pytest-cov - coverage - flake8 - black - -commands = - py{37,38}-pandas10: pip install -vv pandas>=1.0.0,<1.1.0 - py{37,38,39}-pandas11: pip install -vv pandas>=1.1.0,<1.2.0 - py{37,38,39}-pandas12: pip install -vv pandas>=1.2.0 - pytest --cov=pytest pyfolio/tests diff --git a/versioneer.py b/versioneer.py deleted file mode 100644 index 47622bb5..00000000 --- a/versioneer.py +++ /dev/null @@ -1,1761 +0,0 @@ -# Version: 0.15 - -""" -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -First, decide on values for the following configuration variables: - -* `VCS`: the version control system you use. Currently accepts "git". - -* `style`: the style of version string to be produced. See "Styles" below for - details. Defaults to "pep440", which looks like - `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. - -* `versionfile_source`: - - A project-relative pathname into which the generated version strings should - be written. This is usually a `_version.py` next to your project's main - `__init__.py` file, so it can be imported at runtime. If your project uses - `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. - This file should be checked in to your VCS as usual: the copy created below - by `setup.py setup_versioneer` will include code that parses expanded VCS - keywords in generated tarballs. The 'build' and 'sdist' commands will - replace it with a copy that has just the calculated version string. - - This must be set even if your project does not have any modules (and will - therefore never import `_version.py`), since "setup.py sdist" -based trees - still need somewhere to record the pre-calculated version strings. Anywhere - in the source tree should do. If there is a `__init__.py` next to your - `_version.py`, the `setup.py setup_versioneer` command (described below) - will append some `__version__`-setting assignments, if they aren't already - present. - -* `versionfile_build`: - - Like `versionfile_source`, but relative to the build directory instead of - the source directory. These will differ when your setup.py uses - 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`, - then you will probably have `versionfile_build='myproject/_version.py'` and - `versionfile_source='src/myproject/_version.py'`. - - If this is set to None, then `setup.py build` will not attempt to rewrite - any `_version.py` in the built tree. If your project does not have any - libraries (e.g. if it only builds a script), then you should use - `versionfile_build = None` and override `distutils.command.build_scripts` - to explicitly insert a copy of `versioneer.get_version()` into your - generated script. - -* `tag_prefix`: - - a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. - If your tags look like 'myproject-1.2.0', then you should use - tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this - should be an empty string. - -* `parentdir_prefix`: - - a optional string, frequently the same as tag_prefix, which appears at the - start of all unpacked tarball filenames. If your tarball unpacks into - 'myproject-1.2.0', this should be 'myproject-'. To disable this feature, - just omit the field from your `setup.cfg`. - -This tool provides one script, named `versioneer`. That script has one mode, -"install", which writes a copy of `versioneer.py` into the current directory -and runs `versioneer.py setup` to finish the installation. - -To versioneer-enable your project: - -* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and - populating it with the configuration values you decided earlier (note that - the option names are not case-sensitive): - - ```` - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = "" - parentdir_prefix = myproject- - ```` - -* 2: Run `versioneer install`. This will do the following: - - * copy `versioneer.py` into the top of your source tree - * create `_version.py` in the right place (`versionfile_source`) - * modify your `__init__.py` (if one exists next to `_version.py`) to define - `__version__` (by calling a function from `_version.py`) - * modify your `MANIFEST.in` to include both `versioneer.py` and the - generated `_version.py` in sdist tarballs - - `versioneer install` will complain about any problems it finds with your - `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all - the problems. - -* 3: add a `import versioneer` to your setup.py, and add the following - arguments to the setup() call: - - version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), - -* 4: commit these changes to your VCS. To make sure you won't forget, - `versioneer install` will mark everything it touched for addition using - `git add`. Don't forget to add `setup.py` and `setup.cfg` too. - -## Post-Installation Usage - -Once established, all uses of your tree from a VCS checkout should get the -current version string. All generated tarballs should include an embedded -version string (so users who unpack them will not need a VCS tool installed). - -If you distribute your project through PyPI, then the release process should -boil down to two steps: - -* 1: git tag 1.0 -* 2: python setup.py register sdist upload - -If you distribute it through github (i.e. users use github to generate -tarballs with `git archive`), the process is: - -* 1: git tag 1.0 -* 2: git push; git push --tags - -Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at -least one tag in its history. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See details.md in the Versioneer source tree for -descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -### Upgrading to 0.15 - -Starting with this version, Versioneer is configured with a `[versioneer]` -section in your `setup.cfg` file. Earlier versions required the `setup.py` to -set attributes on the `versioneer` module immediately after import. The new -version will refuse to run (raising an exception during import) until you -have provided the necessary `setup.cfg` section. - -In addition, the Versioneer package provides an executable named -`versioneer`, and the installation process is driven by running `versioneer -install`. In 0.14 and earlier, the executable was named -`versioneer-installer` and was run without an argument. - -### Upgrading to 0.14 - -0.14 changes the format of the version string. 0.13 and earlier used -hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a -plus-separated "local version" section strings, with dot-separated -components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old -format, but should be ok with the new one. - -### Upgrading from 0.11 to 0.12 - -Nothing special. - -### Upgrading from 0.10 to 0.11 - -You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running -`setup.py setup_versioneer`. This will enable the use of additional -version-control systems (SVN, etc) in the future. - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - - -## License - -To make Versioneer easier to embed, all its code is hereby released into the -public domain. The `_version.py` that it creates is also in the public -domain. - -""" - -from __future__ import print_function - -try: - import configparser -except ImportError: - import ConfigParser as configparser -import errno -import json -import os -import re -import subprocess -import sys - - -class VersioneerConfig: - pass - - -def get_root(): - # we require that all commands are run from the project root, i.e. the - # directory that contains setup.py, setup.cfg, and versioneer.py . - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ( - "Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND')." - ) - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: - print( - "Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py) - ) - except NameError: - pass - return root - - -def get_config_from_root(root): - # This might raise EnvironmentError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None - - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") - return cfg - - -class NotThisMethod(Exception): - pass - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - def decorate(f): - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen( - [c] + args, - cwd=cwd, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr else None), - ) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - return None - return stdout - - -LONG_VERSION_PY[ - "git" -] = """ -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.15 (https://github.com/warner/python-versioneer) - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full} - return keywords - - -class VersioneerConfig: - pass - - -def get_config(): - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - pass - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - def decorate(f): - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - return None - return stdout - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - # Source tarballs conventionally unpack into a directory that includes - # both the project name and a version string. - dirname = os.path.basename(root) - if not dirname.startswith(parentdir_prefix): - if verbose: - print("guessing rootdir is '%%s', but '%%s' doesn't start with " - "prefix '%%s'" %% (root, dirname, parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None} - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - if not keywords: - raise NotThisMethod("no keywords at all, weird") - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs-tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None - } - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags"} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - # this runs 'git' from the root of the source tree. This only gets called - # if the git-archive 'subst' keywords were *not* expanded, and - # _version.py hasn't already been rewritten with a short version string, - # meaning we're inside a checked out source tree. - - if not os.path.exists(os.path.join(root, ".git")): - if verbose: - print("no .git in %%s" %% root) - raise NotThisMethod("no .git directory") - - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - # if there is a tag, this yields TAG-NUM-gHEX[-dirty] - # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long"], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - return pieces - - -def plus_or_dot(pieces): - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - # now build up version string, with post-release "local version - # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - # exceptions: - # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - # TAG[.post.devDISTANCE] . No -dirty - - # exceptions: - # 1: no tags. 0.post.devDISTANCE - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that - # .dev0 sorts backwards (a dirty tree will appear "older" than the - # corresponding clean one), but you shouldn't be releasing software with - # -dirty anyways. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_old(pieces): - # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty - # --always' - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty - # --always -long'. The distance/hash is unconditional. - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"]} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None} - - -def get_versions(): - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree"} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version"} -""" - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - if not keywords: - raise NotThisMethod("no keywords at all, weird") - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r"\d", r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix) :] - if verbose: - print("picking %s" % r) - return { - "version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": None, - } - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return { - "version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": "no suitable tags", - } - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - # this runs 'git' from the root of the source tree. This only gets called - # if the git-archive 'subst' keywords were *not* expanded, and - # _version.py hasn't already been rewritten with a short version string, - # meaning we're inside a checked out source tree. - - if not os.path.exists(os.path.join(root, ".git")): - if verbose: - print("no .git in %s" % root) - raise NotThisMethod("no .git directory") - - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - # if there is a tag, this yields TAG-NUM-gHEX[-dirty] - # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command( - GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root - ) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[: git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ( - "unable to parse git-describe output: '%s'" % describe_out - ) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( - full_tag, - tag_prefix, - ) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix) :] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out = run_command( - GITS, ["rev-list", "HEAD", "--count"], cwd=root - ) - pieces["distance"] = int(count_out) # total number of commits - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: - pass - if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - # Source tarballs conventionally unpack into a directory that includes - # both the project name and a version string. - dirname = os.path.basename(root) - if not dirname.startswith(parentdir_prefix): - if verbose: - print( - "guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix) - ) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return { - "version": dirname[len(parentdir_prefix) :], - "full-revisionid": None, - "dirty": False, - "error": None, - } - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.15) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json -import sys - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - try: - with open(filename) as f: - contents = f.read() - except EnvironmentError: - raise NotThisMethod("unable to read _version.py") - mo = re.search( - r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, - re.M | re.S, - ) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - os.unlink(filename) - contents = json.dumps( - versions, sort_keys=True, indent=1, separators=(",", ": ") - ) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - # now build up version string, with post-release "local version - # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - # exceptions: - # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - # TAG[.post.devDISTANCE] . No -dirty - - # exceptions: - # 1: no tags. 0.post.devDISTANCE - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that - # .dev0 sorts backwards (a dirty tree will appear "older" than the - # corresponding clean one), but you shouldn't be releasing software with - # -dirty anyways. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - - # exceptions: - # 1: no tags. 0.postDISTANCE[.dev0] - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty - # --always' - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty - # --always -long'. The distance/hash is unconditional. - - # exceptions: - # 1: no tags. HEX[-dirty] (note: no 'g' prefix) - - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - if pieces["error"]: - return { - "version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - } - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return { - "version": rendered, - "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], - "error": None, - } - - -class VersioneerBadRootError(Exception): - pass - - -def get_versions(verbose=False): - # returns dict with two keys: 'version' and 'full' - - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert ( - cfg.versionfile_source is not None - ), "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", - } - - -def get_version(): - return get_versions()["version"] - - -def get_cmdclass(): - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 - - cmds = {} - - # we add "version" to both distutils and setuptools - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - if vers["error"]: - print(" error: %s" % vers["error"]) - - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - - from distutils.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join( - self.build_lib, cfg.versionfile_build - ) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - cmds["build_py"] = cmd_build_py - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write( - LONG - % { - "DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - } - ) - - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file( - target_versionfile, self._versioneer_generated_versions - ) - - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = "" - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -INIT_PY_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - - -def do_setup(): - root = get_root() - try: - cfg = get_config_from_root(root) - except ( - EnvironmentError, - configparser.NoSectionError, - configparser.NoOptionError, - ) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print( - "Adding sample versioneer config to setup.cfg", file=sys.stderr - ) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write( - LONG - % { - "DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - } - ) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print( - " appending versionfile_source ('%s') to MANIFEST.in" - % cfg.versionfile_source - ) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-time keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) From f65dc5a326940a5c8708b31c9166ecae3edc2db7 Mon Sep 17 00:00:00 2001 From: Stefan Jansen Date: Thu, 12 Jan 2023 11:30:43 -0500 Subject: [PATCH 2/4] do not track _version.py --- .gitignore | 1 + src/pyfolio/_version.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 src/pyfolio/_version.py diff --git a/.gitignore b/.gitignore index ae891fd5..5a6c249f 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,4 @@ target/ .ipynb_checkpoints/ .idea +src/pyfolio/_version.py diff --git a/src/pyfolio/_version.py b/src/pyfolio/_version.py deleted file mode 100644 index 0beaed4b..00000000 --- a/src/pyfolio/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# file generated by setuptools_scm -# don't change, don't track in version control -__version__ = version = "0.9.5.dev4+dirty" -__version_tuple__ = version_tuple = (0, 9, 5, "dev4", "dirty") From f0ffc856d0756fb6e1804ae50a4af577d4cf3bcc Mon Sep 17 00:00:00 2001 From: Stefan Jansen Date: Thu, 12 Jan 2023 11:34:42 -0500 Subject: [PATCH 3/4] no reruns, no py37 --- .github/workflows/unit_tests.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 54fa92b5..9fb5ff3f 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest , windows-latest, macos-latest ] - python-version: [ 3.7, 3.8, 3.9, '3.10', '3.11'] + python-version: [ 3.8, 3.9, '3.10', '3.11'] steps: - name: Checkout pyfolio uses: actions/checkout@v2 diff --git a/pyproject.toml b/pyproject.toml index bb18bdad..0773b28b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,5 +159,5 @@ deps = pandas15: pandas>=1.5.0,<1.6 commands = - pytest -n 4 --reruns 5 --cov={toxinidir}/src --cov-report term --cov-report=xml --cov-report=html:htmlcov {toxinidir}/tests + pytest -n 2 --cov={toxinidir}/src --cov-report term --cov-report=xml --cov-report=html:htmlcov {toxinidir}/tests """ From 3a235296afdfd9f18ba3b89eb117071084e55b07 Mon Sep 17 00:00:00 2001 From: Stefan Jansen Date: Thu, 12 Jan 2023 12:16:23 -0500 Subject: [PATCH 4/4] remove 3.7 support --- .github/workflows/build_wheels.yml | 25 +++++++++++++------------ pyproject.toml | 4 ++-- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 7cc81121..cd5b9c57 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -3,13 +3,11 @@ name: PyPI on: workflow_dispatch: inputs: - target: - description: 'Package Index' + publish_to_pypi: + description: 'Publish to PyPI?' required: true - default: 'TESTPYPI' - release: - types: - - published + type: boolean + default: 'false' jobs: dist: @@ -18,7 +16,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ 3.8 ] + python-version: [ 3.10 ] steps: - name: Checkout pyfolio @@ -45,21 +43,24 @@ jobs: upload_pypi: needs: [ dist ] runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags') steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: artifact path: dist + - name: publish to testpypi - uses: pypa/gh-action-pypi-publish@master - if: ${{ github.event.inputs.target }} == 'TESTPYPI' + uses: pypa/gh-action-pypi-publish@release/v1 + if: inputs.publish_to_pypi == 'false' with: user: __token__ password: ${{ secrets.TESTPYPI_TOKEN }} repository_url: https://test.pypi.org/legacy/ + - name: publish to pypi - uses: pypa/gh-action-pypi-publish@master - if: ${{ github.event.inputs.target }} == 'PYPI' || (github.event_name == 'release' && github.event.action == 'published') + uses: pypa/gh-action-pypi-publish@release/v1 + if: inputs.publish_to_pypi == 'true' with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} diff --git a/pyproject.toml b/pyproject.toml index 0773b28b..91c66878 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,7 @@ requires = [ 'setuptools>=54.0.0', "setuptools_scm[toml]>=6.2", 'wheel>=0.31.0', - 'oldest-supported-numpy; python_version>="3.7"', + 'oldest-supported-numpy; python_version>="3.8"', ] build-backend = 'setuptools.build_meta' @@ -106,7 +106,7 @@ addopts = '-v' [tool.cibuildwheel] test-extras = "test" -test-command = "pytest -n 2 --reruns 5 {package}/tests" +test-command = "pytest -n 2 {package}/tests" build-verbosity = 3