From 8301f543fc06e65bf9c27024b0faf0201a70cf3d Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Mon, 1 Aug 2022 21:41:43 -0400 Subject: [PATCH 001/200] Vendor new safety (#5217) * Vendor safety==2.1.1 cleanly with ruamel. * Apply more minimal patch to safety. --- pipenv/patched/patched.txt | 2 +- pipenv/patched/safety/VERSION | 1 + pipenv/patched/safety/__init__.py | 8 +- pipenv/patched/safety/__main__.py | 42 +- pipenv/patched/safety/cli.py | 451 ++- pipenv/patched/safety/constants.py | 28 +- pipenv/patched/safety/errors.py | 102 +- pipenv/patched/safety/formatter.py | 366 +-- pipenv/patched/safety/formatters/__init__.py | 0 pipenv/patched/safety/formatters/bare.py | 38 + pipenv/patched/safety/formatters/json.py | 75 + pipenv/patched/safety/formatters/screen.py | 143 + pipenv/patched/safety/formatters/text.py | 134 + pipenv/patched/safety/models.py | 110 + pipenv/patched/safety/output_utils.py | 683 +++++ .../patched/safety/safety-policy-template.yml | 14 + pipenv/patched/safety/safety.py | 518 +++- pipenv/patched/safety/util.py | 568 +++- .../ruamel.yaml-0.17.21-py3.9-nspkg.pth | 1 + ...uamel.yaml-0.17.21-py3.9-nspkg.pth.LICENSE | 21 + pipenv/vendor/ruamel.yaml.LICENSE | 21 + pipenv/vendor/ruamel/yaml/__init__.py | 57 + pipenv/vendor/ruamel/yaml/anchor.py | 20 + pipenv/vendor/ruamel/yaml/comments.py | 1267 +++++++++ pipenv/vendor/ruamel/yaml/compat.py | 268 ++ pipenv/vendor/ruamel/yaml/composer.py | 243 ++ pipenv/vendor/ruamel/yaml/configobjwalker.py | 14 + pipenv/vendor/ruamel/yaml/constructor.py | 1845 +++++++++++++ pipenv/vendor/ruamel/yaml/cyaml.py | 183 ++ pipenv/vendor/ruamel/yaml/dumper.py | 219 ++ pipenv/vendor/ruamel/yaml/emitter.py | 1772 ++++++++++++ pipenv/vendor/ruamel/yaml/error.py | 332 +++ pipenv/vendor/ruamel/yaml/events.py | 196 ++ pipenv/vendor/ruamel/yaml/loader.py | 75 + pipenv/vendor/ruamel/yaml/main.py | 1667 +++++++++++ pipenv/vendor/ruamel/yaml/nodes.py | 135 + pipenv/vendor/ruamel/yaml/parser.py | 884 ++++++ pipenv/vendor/ruamel/yaml/py.typed | 0 pipenv/vendor/ruamel/yaml/reader.py | 302 ++ pipenv/vendor/ruamel/yaml/representer.py | 1156 ++++++++ pipenv/vendor/ruamel/yaml/resolver.py | 405 +++ pipenv/vendor/ruamel/yaml/scalarbool.py | 47 + pipenv/vendor/ruamel/yaml/scalarfloat.py | 124 + pipenv/vendor/ruamel/yaml/scalarint.py | 127 + pipenv/vendor/ruamel/yaml/scalarstring.py | 152 + pipenv/vendor/ruamel/yaml/scanner.py | 2444 +++++++++++++++++ pipenv/vendor/ruamel/yaml/serializer.py | 241 ++ pipenv/vendor/ruamel/yaml/timestamp.py | 61 + pipenv/vendor/ruamel/yaml/tokens.py | 404 +++ pipenv/vendor/ruamel/yaml/util.py | 256 ++ pipenv/vendor/vendor.txt | 1 + .../patches/patched/safety-main.patch | 47 +- 52 files changed, 17610 insertions(+), 660 deletions(-) create mode 100644 pipenv/patched/safety/VERSION create mode 100644 pipenv/patched/safety/formatters/__init__.py create mode 100644 pipenv/patched/safety/formatters/bare.py create mode 100644 pipenv/patched/safety/formatters/json.py create mode 100644 pipenv/patched/safety/formatters/screen.py create mode 100644 pipenv/patched/safety/formatters/text.py create mode 100644 pipenv/patched/safety/models.py create mode 100644 pipenv/patched/safety/output_utils.py create mode 100644 pipenv/patched/safety/safety-policy-template.yml create mode 100644 pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth create mode 100644 pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth.LICENSE create mode 100644 pipenv/vendor/ruamel.yaml.LICENSE create mode 100644 pipenv/vendor/ruamel/yaml/__init__.py create mode 100644 pipenv/vendor/ruamel/yaml/anchor.py create mode 100644 pipenv/vendor/ruamel/yaml/comments.py create mode 100644 pipenv/vendor/ruamel/yaml/compat.py create mode 100644 pipenv/vendor/ruamel/yaml/composer.py create mode 100644 pipenv/vendor/ruamel/yaml/configobjwalker.py create mode 100644 pipenv/vendor/ruamel/yaml/constructor.py create mode 100644 pipenv/vendor/ruamel/yaml/cyaml.py create mode 100644 pipenv/vendor/ruamel/yaml/dumper.py create mode 100644 pipenv/vendor/ruamel/yaml/emitter.py create mode 100644 pipenv/vendor/ruamel/yaml/error.py create mode 100644 pipenv/vendor/ruamel/yaml/events.py create mode 100644 pipenv/vendor/ruamel/yaml/loader.py create mode 100644 pipenv/vendor/ruamel/yaml/main.py create mode 100644 pipenv/vendor/ruamel/yaml/nodes.py create mode 100644 pipenv/vendor/ruamel/yaml/parser.py create mode 100644 pipenv/vendor/ruamel/yaml/py.typed create mode 100644 pipenv/vendor/ruamel/yaml/reader.py create mode 100644 pipenv/vendor/ruamel/yaml/representer.py create mode 100644 pipenv/vendor/ruamel/yaml/resolver.py create mode 100644 pipenv/vendor/ruamel/yaml/scalarbool.py create mode 100644 pipenv/vendor/ruamel/yaml/scalarfloat.py create mode 100644 pipenv/vendor/ruamel/yaml/scalarint.py create mode 100644 pipenv/vendor/ruamel/yaml/scalarstring.py create mode 100644 pipenv/vendor/ruamel/yaml/scanner.py create mode 100644 pipenv/vendor/ruamel/yaml/serializer.py create mode 100644 pipenv/vendor/ruamel/yaml/timestamp.py create mode 100644 pipenv/vendor/ruamel/yaml/tokens.py create mode 100644 pipenv/vendor/ruamel/yaml/util.py diff --git a/pipenv/patched/patched.txt b/pipenv/patched/patched.txt index 1163d1ee4b..1eab502e59 100644 --- a/pipenv/patched/patched.txt +++ b/pipenv/patched/patched.txt @@ -1,3 +1,3 @@ pip==22.2.1 pipfile==0.0.2 -safety==1.10.3 +safety==2.1.1 diff --git a/pipenv/patched/safety/VERSION b/pipenv/patched/safety/VERSION new file mode 100644 index 0000000000..3e3c2f1e5e --- /dev/null +++ b/pipenv/patched/safety/VERSION @@ -0,0 +1 @@ +2.1.1 diff --git a/pipenv/patched/safety/__init__.py b/pipenv/patched/safety/__init__.py index 1667d43df6..da2f5d50d9 100644 --- a/pipenv/patched/safety/__init__.py +++ b/pipenv/patched/safety/__init__.py @@ -2,4 +2,10 @@ __author__ = """pyup.io""" __email__ = 'support@pyup.io' -__version__ = '1.10.3' + +import os + +ROOT = os.path.dirname(os.path.abspath(__file__)) + +with open(os.path.join(ROOT, 'VERSION')) as version_file: + VERSION = version_file.read().strip() diff --git a/pipenv/patched/safety/__main__.py b/pipenv/patched/safety/__main__.py index cbd1a075da..be36e88b5e 100644 --- a/pipenv/patched/safety/__main__.py +++ b/pipenv/patched/safety/__main__.py @@ -1,48 +1,8 @@ """Allow safety to be executable through `python -m safety`.""" from __future__ import absolute_import -import os -import sys -import sysconfig - - -PATCHED_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -PIPENV_DIR = os.path.dirname(PATCHED_DIR) -VENDORED_DIR = os.path.join("PIPENV_DIR", "vendor") - - -def get_site_packages(): - prefixes = {sys.prefix, sysconfig.get_config_var('prefix')} - try: - prefixes.add(sys.real_prefix) - except AttributeError: - pass - form = sysconfig.get_path('purelib', expand=False) - py_version_short = '{0[0]}.{0[1]}'.format(sys.version_info) - return { - form.format(base=prefix, py_version_short=py_version_short) - for prefix in prefixes - } - - -def insert_before_site_packages(*paths): - site_packages = get_site_packages() - index = None - for i, path in enumerate(sys.path): - if path in site_packages: - index = i - break - if index is None: - sys.path += list(paths) - else: - sys.path = sys.path[:index] + list(paths) + sys.path[index:] - - -def insert_pipenv_dirs(): - insert_before_site_packages(os.path.dirname(PIPENV_DIR), PATCHED_DIR, VENDORED_DIR) +from pipenv.patched.safety.cli import cli if __name__ == "__main__": # pragma: no cover - insert_pipenv_dirs() - from pipenv.patched.safety.cli import cli cli(prog_name="safety") diff --git a/pipenv/patched/safety/cli.py b/pipenv/patched/safety/cli.py index 0d092fb2f9..4993b72e45 100644 --- a/pipenv/patched/safety/cli.py +++ b/pipenv/patched/safety/cli.py @@ -1,23 +1,49 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import + +import json +import logging +import os import sys +import tempfile + import pipenv.vendor.click as click -from pipenv.patched.safety import __version__ + from pipenv.patched.safety import safety -from pipenv.patched.safety.formatter import report, license_report -import itertools -from pipenv.patched.safety.util import read_requirements, read_vulnerabilities, get_proxy_dict, get_packages_licenses -from pipenv.patched.safety.errors import DatabaseFetchError, DatabaseFileNotFoundError, InvalidKeyError, TooManyRequestsError +from pipenv.patched.safety.constants import EXIT_CODE_VULNERABILITIES_FOUND, EXIT_CODE_OK, EXIT_CODE_FAILURE +from pipenv.patched.safety.errors import SafetyException, SafetyError +from pipenv.patched.safety.formatter import SafetyFormatter +from pipenv.patched.safety.output_utils import should_add_nl +from pipenv.patched.safety.safety import get_packages, read_vulnerabilities, fetch_policy, post_results +from pipenv.patched.safety.util import get_proxy_dict, get_packages_licenses, output_exception, \ + MutuallyExclusiveOption, DependentOption, transform_ignore, SafetyPolicyFile, active_color_if_needed, \ + get_processed_options, get_safety_version, json_alias, bare_alias, SafetyContext + +LOG = logging.getLogger(__name__) -try: - from json.decoder import JSONDecodeError -except ImportError: - JSONDecodeError = ValueError @click.group() -@click.version_option(version=__version__) -def cli(): - pass +@click.option('--debug/--no-debug', default=False) +@click.option('--telemetry/--disable-telemetry', default=True, hidden=True) +@click.option('--disable-optional-telemetry-data', default=False, cls=MutuallyExclusiveOption, + mutually_exclusive=["telemetry", "disable-telemetry"], is_flag=True, show_default=True) +@click.version_option(version=get_safety_version()) +@click.pass_context +def cli(ctx, debug, telemetry, disable_optional_telemetry_data): + """ + Safety checks Python dependencies for known security vulnerabilities and suggests the proper + remediations for vulnerabilities detected. Safety can be run on developer machines, in CI/CD pipelines and + on production systems. + """ + SafetyContext().safety_source = 'cli' + ctx.telemetry = telemetry and not disable_optional_telemetry_data + level = logging.CRITICAL + if debug: + level = logging.DEBUG + + logging.basicConfig(format='%(asctime)s %(name)s => %(message)s', level=level) + + LOG.info(f'Telemetry enabled: {ctx.telemetry}') @cli.command() @@ -26,100 +52,194 @@ def cli(): "environment variable. Default: empty") @click.option("--db", default="", help="Path to a local vulnerability database. Default: empty") -@click.option("--json/--no-json", default=False, - help="Output vulnerabilities in JSON format. Default: --no-json") -@click.option("--full-report/--short-report", default=False, - help='Full reports include a security advisory (if available). Default: ' - '--short-report') -@click.option("--bare/--not-bare", default=False, - help='Output vulnerable packages only. ' - 'Useful in combination with other tools. ' - 'Default: --not-bare') -@click.option("--cache/--no-cache", default=False, - help="Cache requests to the vulnerability database locally. Default: --no-cache") -@click.option("--stdin/--no-stdin", default=False, - help="Read input from stdin. Default: --no-stdin") -@click.option("files", "--file", "-r", multiple=True, type=click.File(), +@click.option("--full-report/--short-report", default=False, cls=MutuallyExclusiveOption, + mutually_exclusive=["output", "json", "bare"], + with_values={"output": ['json', 'bare'], "json": [True, False], "bare": [True, False]}, + help='Full reports include a security advisory (if available). Default: --short-report') +@click.option("--cache", is_flag=False, flag_value=60, default=0, + help="Cache requests to the vulnerability database locally. Default: 0 seconds", + hidden=True) +@click.option("--stdin", default=False, cls=MutuallyExclusiveOption, mutually_exclusive=["files"], + help="Read input from stdin.", is_flag=True, show_default=True) +@click.option("files", "--file", "-r", multiple=True, type=click.File(), cls=MutuallyExclusiveOption, + mutually_exclusive=["stdin"], help="Read input from one (or multiple) requirement files. Default: empty") -@click.option("ignore", "--ignore", "-i", multiple=True, type=str, default=[], +@click.option("--ignore", "-i", multiple=True, type=str, default=[], callback=transform_ignore, help="Ignore one (or multiple) vulnerabilities by ID. Default: empty") -@click.option("--output", "-o", default="", - help="Path to where output file will be placed. Default: empty") -@click.option("proxyhost", "--proxy-host", "-ph", multiple=False, type=str, default=None, +@click.option('--json', default=False, cls=MutuallyExclusiveOption, mutually_exclusive=["output", "bare"], + with_values={"output": ['screen', 'text', 'bare', 'json'], "bare": [True, False]}, callback=json_alias, + hidden=True, is_flag=True, show_default=True) +@click.option('--bare', default=False, cls=MutuallyExclusiveOption, mutually_exclusive=["output", "json"], + with_values={"output": ['screen', 'text', 'bare', 'json'], "json": [True, False]}, callback=bare_alias, + hidden=True, is_flag=True, show_default=True) +@click.option('--output', "-o", type=click.Choice(['screen', 'text', 'json', 'bare'], case_sensitive=False), + default='screen', callback=active_color_if_needed, envvar='SAFETY_OUTPUT') +@click.option("--proxy-protocol", "-pr", type=click.Choice(['http', 'https']), default='https', cls=DependentOption, required_options=['proxy_host'], + help="Proxy protocol (https or http) --proxy-protocol") +@click.option("--proxy-host", "-ph", multiple=False, type=str, default=None, help="Proxy host IP or DNS --proxy-host") -@click.option("proxyport", "--proxy-port", "-pp", multiple=False, type=int, default=80, +@click.option("--proxy-port", "-pp", multiple=False, type=int, default=80, cls=DependentOption, required_options=['proxy_host'], help="Proxy port number --proxy-port") -@click.option("proxyprotocol", "--proxy-protocol", "-pr", multiple=False, type=str, default='http', - help="Proxy protocol (https or http) --proxy-protocol") -def check(key, db, json, full_report, bare, stdin, files, cache, ignore, output, proxyprotocol, proxyhost, proxyport): - if files and stdin: - click.secho("Can't read from --stdin and --file at the same time, exiting", fg="red", file=sys.stderr) - sys.exit(-1) - - if files: - packages = list(itertools.chain.from_iterable(read_requirements(f, resolve=True) for f in files)) - elif stdin: - packages = list(read_requirements(sys.stdin)) - else: - import pkg_resources - packages = [ - d for d in pkg_resources.working_set - if d.key not in {"python", "wsgiref", "argparse"} - ] - proxy_dictionary = get_proxy_dict(proxyprotocol, proxyhost, proxyport) +@click.option("--exit-code/--continue-on-error", default=True, + help="Output standard exit codes. Default: --exit-code") +@click.option("--policy-file", type=SafetyPolicyFile(), default='.safety-policy.yml', + help="Define the policy file to be used") +@click.option("--audit-and-monitor/--disable-audit-and-monitor", default=True, + help="Send results back to pyup.io for viewing on your dashboard. Requires an API key.") +@click.option("--project", default=None, + help="Project to associate this scan with on pyup.io. Defaults to a canonicalized github style name if available, otherwise unknown") + +@click.option("--save-json", default="", help="Path to where output file will be placed, if the path is a directory, " + "Safety will use safety-report.json as filename. Default: empty") +@click.pass_context +def check(ctx, key, db, full_report, stdin, files, cache, ignore, output, json, bare, proxy_protocol, proxy_host, proxy_port, + exit_code, policy_file, save_json, audit_and_monitor, project): + """ + Find vulnerabilities in Python dependencies at the target provided. + + """ + LOG.info('Running check command') + try: - vulns = safety.check(packages=packages, key=key, db_mirror=db, cached=cache, ignore_ids=ignore, proxy=proxy_dictionary) - output_report = report(vulns=vulns, - full=full_report, - json_report=json, - bare_report=bare, - checked_packages=len(packages), - db=db, - key=key) - - if output: - with open(output, 'w+') as output_file: - output_file.write(output_report) + packages = get_packages(files, stdin) + proxy_dictionary = get_proxy_dict(proxy_protocol, proxy_host, proxy_port) + + announcements = [] + if not db: + LOG.info('Not local DB used, Getting announcements') + announcements = safety.get_announcements(key=key, proxy=proxy_dictionary, telemetry=ctx.parent.telemetry) + + if key: + server_policies = fetch_policy(key=key, proxy=proxy_dictionary) + server_audit_and_monitor = server_policies["audit_and_monitor"] + server_safety_policy = server_policies["safety_policy"] else: - click.secho(output_report, nl=False if bare and not vulns else True) - sys.exit(-1 if vulns else 0) - except InvalidKeyError: - click.secho("Your API Key '{key}' is invalid. See {link}".format( - key=key, link='https://goo.gl/O7Y1rS'), - fg="red", - file=sys.stderr) - sys.exit(-1) - except DatabaseFileNotFoundError: - click.secho("Unable to load vulnerability database from {db}".format(db=db), fg="red", file=sys.stderr) - sys.exit(-1) - except DatabaseFetchError: - click.secho("Unable to load vulnerability database", fg="red", file=sys.stderr) - sys.exit(-1) + server_audit_and_monitor = False + server_safety_policy = "" + + if server_safety_policy and policy_file: + click.secho( + "Warning: both a local policy file '{policy_filename}' and a server sent policy are present. " + "Continuing with the local policy file.".format(policy_filename=policy_file['filename']), + fg="yellow", + file=sys.stderr + ) + elif server_safety_policy: + with tempfile.NamedTemporaryFile(prefix='server-safety-policy-') as tmp: + tmp.write(server_safety_policy.encode('utf-8')) + tmp.seek(0) + + policy_file = SafetyPolicyFile().convert(tmp.name, param=None, ctx=None) + LOG.info('Using server side policy file') + + ignore_severity_rules = None + ignore, ignore_severity_rules, exit_code = get_processed_options(policy_file, ignore, + ignore_severity_rules, exit_code) + + is_env_scan = not stdin and not files + params = {'stdin': stdin, 'files': files, 'policy_file': policy_file, 'continue_on_error': not exit_code, + 'ignore_severity_rules': ignore_severity_rules, 'project': project, 'audit_and_monitor': server_audit_and_monitor and audit_and_monitor} + LOG.info('Calling the check function') + vulns, db_full = safety.check(packages=packages, key=key, db_mirror=db, cached=cache, ignore_vulns=ignore, + ignore_severity_rules=ignore_severity_rules, proxy=proxy_dictionary, + include_ignored=True, is_env_scan=is_env_scan, telemetry=ctx.parent.telemetry, + params=params) + LOG.debug('Vulnerabilities returned: %s', vulns) + LOG.debug('full database returned is None: %s', db_full is None) + + LOG.info('Safety is going to calculate remediations') + remediations = safety.calculate_remediations(vulns, db_full) + + json_report = None + if save_json or (server_audit_and_monitor and audit_and_monitor): + default_name = 'safety-report.json' + json_report = SafetyFormatter(output='json').render_vulnerabilities(announcements, vulns, remediations, + full_report, packages) + + if server_audit_and_monitor and audit_and_monitor: + policy_contents = '' + if policy_file: + policy_contents = policy_file.get('raw', '') + + r = post_results(key=key, proxy=proxy_dictionary, safety_json=json_report, policy_file=policy_contents) + SafetyContext().params['audit_and_monitor_url'] = r.get('url') + + if save_json: + if os.path.isdir(save_json): + save_json = os.path.join(save_json, default_name) + + with open(save_json, 'w+') as output_json_file: + output_json_file.write(json_report) + + LOG.info('Safety is going to render the vulnerabilities report using %s output', output) + if json_report and output == 'json': + output_report = json_report + else: + output_report = SafetyFormatter(output=output).render_vulnerabilities(announcements, vulns, remediations, + full_report, packages) + + # Announcements are send to stderr if not terminal, it doesn't depend on "exit_code" value + if announcements and (not sys.stdout.isatty() and os.environ.get("SAFETY_OS_DESCRIPTION", None) != 'run'): + LOG.info('sys.stdout is not a tty, announcements are going to be send to stderr') + click.secho(SafetyFormatter(output='text').render_announcements(announcements), fg="red", file=sys.stderr) + + found_vulns = list(filter(lambda v: not v.ignored, vulns)) + LOG.info('Vulnerabilities found (Not ignored): %s', len(found_vulns)) + LOG.info('All vulnerabilities found (ignored and Not ignored): %s', len(vulns)) + + click.secho(output_report, nl=should_add_nl(output, found_vulns), file=sys.stdout) + + if exit_code and found_vulns: + LOG.info('Exiting with default code for vulnerabilities found') + sys.exit(EXIT_CODE_VULNERABILITIES_FOUND) + + sys.exit(EXIT_CODE_OK) + + except SafetyError as e: + LOG.exception('Expected SafetyError happened: %s', e) + output_exception(e, exit_code_output=exit_code) + except Exception as e: + LOG.exception('Unexpected Exception happened: %s', e) + exception = e if isinstance(e, SafetyException) else SafetyException(info=e) + output_exception(exception, exit_code_output=exit_code) @cli.command() -@click.option("--full-report/--short-report", default=False, +@click.option("--full-report/--short-report", default=False, cls=MutuallyExclusiveOption, mutually_exclusive=["output"], with_values={"output": ['json', 'bare']}, help='Full reports include a security advisory (if available). Default: ' '--short-report') -@click.option("--bare/--not-bare", default=False, - help='Output vulnerable packages only. Useful in combination with other tools. ' - 'Default: --not-bare') +@click.option('--output', "-o", type=click.Choice(['screen', 'text', 'json', 'bare'], case_sensitive=False), + default='screen', callback=active_color_if_needed) @click.option("file", "--file", "-f", type=click.File(), required=True, help="Read input from an insecure report file. Default: empty") -def review(full_report, bare, file): - if full_report and bare: - click.secho("Can't choose both --bare and --full-report/--short-report", fg="red") - sys.exit(-1) +@click.pass_context +def review(ctx, full_report, output, file): + """ + Show an output from a previous exported JSON report. + """ + LOG.info('Running check command') + announcements = safety.get_announcements(key=None, proxy=None, telemetry=ctx.parent.telemetry) + report = {} try: - input_vulns = read_vulnerabilities(file) - except JSONDecodeError: - click.secho("Not a valid JSON file", fg="red") - sys.exit(-1) + report = read_vulnerabilities(file) + except SafetyError as e: + LOG.exception('Expected SafetyError happened: %s', e) + output_exception(e, exit_code_output=True) + except Exception as e: + LOG.exception('Unexpected Exception happened: %s', e) + exception = e if isinstance(e, SafetyException) else SafetyException(info=e) + output_exception(exception, exit_code_output=True) - vulns = safety.review(input_vulns) - output_report = report(vulns=vulns, full=full_report, bare_report=bare) - click.secho(output_report, nl=False if bare and not vulns else True) + params = {'file': file} + vulns, remediations, packages = safety.review(report, params=params) + + output_report = SafetyFormatter(output=output).render_vulnerabilities(announcements, vulns, remediations, + full_report, packages) + + found_vulns = list(filter(lambda v: not v.ignored, vulns)) + click.secho(output_report, nl=should_add_nl(output, found_vulns), file=sys.stdout) + sys.exit(EXIT_CODE_OK) @cli.command() @@ -128,15 +248,11 @@ def review(full_report, bare, file): "environment variable. Default: empty") @click.option("--db", default="", help="Path to a local license database. Default: empty") -@click.option("--json/--no-json", default=False, - help="Output packages licenses in JSON format. Default: --no-json") -@click.option("--bare/--not-bare", default=False, - help='Output packages licenses names only. ' - 'Useful in combination with other tools. ' - 'Default: --not-bare') -@click.option("--cache/--no-cache", default=True, +@click.option('--output', "-o", type=click.Choice(['screen', 'text', 'json', 'bare'], case_sensitive=False), + default='screen') +@click.option("--cache", default=0, help='Whether license database file should be cached.' - 'Default: --cache') + 'Default: 0 seconds') @click.option("files", "--file", "-r", multiple=True, type=click.File(), help="Read input from one (or multiple) requirement files. Default: empty") @click.option("proxyhost", "--proxy-host", "-ph", multiple=False, type=str, default=None, @@ -145,50 +261,109 @@ def review(full_report, bare, file): help="Proxy port number --proxy-port") @click.option("proxyprotocol", "--proxy-protocol", "-pr", multiple=False, type=str, default='http', help="Proxy protocol (https or http) --proxy-protocol") -def license(key, db, json, bare, cache, files, proxyprotocol, proxyhost, proxyport): - - if files: - packages = list(itertools.chain.from_iterable(read_requirements(f, resolve=True) for f in files)) - else: - import pkg_resources - packages = [ - d for d in pkg_resources.working_set - if d.key not in {"python", "wsgiref", "argparse"} - ] - +@click.pass_context +def license(ctx, key, db, output, cache, files, proxyprotocol, proxyhost, proxyport): + """ + Find the open source licenses used by your Python dependencies. + """ + LOG.info('Running license command') + packages = get_packages(files, False) + proxy_dictionary = get_proxy_dict(proxyprotocol, proxyhost, proxyport) + announcements = [] + if not db: + announcements = safety.get_announcements(key=key, proxy=proxy_dictionary, telemetry=ctx.parent.telemetry) + + licenses_db = {} + try: - licenses_db = safety.get_licenses(key, db, cache, proxy_dictionary) - except InvalidKeyError as invalid_key_error: - if str(invalid_key_error): - message = str(invalid_key_error) - else: - message = "Your API Key '{key}' is invalid. See {link}".format( - key=key, link='https://goo.gl/O7Y1rS' - ) - click.secho(message, fg="red", file=sys.stderr) - sys.exit(-1) - except DatabaseFileNotFoundError: - click.secho("Unable to load licenses database from {db}".format(db=db), fg="red", file=sys.stderr) - sys.exit(-1) - except TooManyRequestsError: - click.secho("Unable to load licenses database (Too many requests, please wait before another request)", - fg="red", - file=sys.stderr - ) - sys.exit(-1) - except DatabaseFetchError: - click.secho("Unable to load licenses database", fg="red", file=sys.stderr) - sys.exit(-1) - filtered_packages_licenses = get_packages_licenses(packages, licenses_db) - output_report = license_report( - packages=packages, - licenses=filtered_packages_licenses, - json_report=json, - bare_report=bare - ) + licenses_db = safety.get_licenses(key, db, cache, proxy_dictionary, telemetry=ctx.parent.telemetry) + except SafetyError as e: + LOG.exception('Expected SafetyError happened: %s', e) + output_exception(e, exit_code_output=False) + except Exception as e: + LOG.exception('Unexpected Exception happened: %s', e) + exception = e if isinstance(e, SafetyException) else SafetyException(info=e) + output_exception(exception, exit_code_output=False) + + filtered_packages_licenses = get_packages_licenses(packages=packages, licenses_db=licenses_db) + + output_report = SafetyFormatter(output=output).render_licenses(announcements, filtered_packages_licenses) + click.secho(output_report, nl=True) +@cli.command() +@click.option("--path", default=".", help="Path where the generated file will be saved. Default: current directory") +@click.argument('name') +@click.pass_context +def generate(ctx, name, path): + """Create a boilerplate supported file type. + + NAME is the name of the file type to generate. Valid values are: policy_file + """ + if name != 'policy_file': + click.secho(f'This Safety version only supports "policy_file" generation. "{name}" is not supported.', fg='red', + file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + LOG.info('Running generate %s', name) + + if not os.path.exists(path): + click.secho(f'The path "{path}" does not exist.', fg='red', + file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + policy = os.path.join(path, '.safety-policy.yml') + ROOT = os.path.dirname(os.path.abspath(__file__)) + + try: + with open(policy, "w") as f: + f.write(open(os.path.join(ROOT, 'safety-policy-template.yml')).read()) + LOG.debug('Safety created the policy file.') + msg = f'A default Safety policy file has been generated! Review the file contents in the path {path} in the ' \ + 'file: .safety-policy.yml' + click.secho(msg, fg='green') + except Exception as exc: + if isinstance(exc, OSError): + LOG.debug('Unable to generate %s because: %s', name, exc.errno) + + click.secho(f'Unable to generate {name}, because: {str(exc)} error.', fg='red', + file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + +@cli.command() +@click.option("--path", default=".safety-policy.yml", help="Path where the generated file will be saved. Default: current directory") +@click.argument('name') +@click.pass_context +def validate(ctx, name, path): + """Verify the validity of a supported file type. + + NAME is the name of the file type to validate. Valid values are: policy_file + """ + if name != 'policy_file': + click.secho(f'This Safety version only supports "policy_file" validation. "{name}" is not supported.', fg='red', + file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + LOG.info('Running validate %s', name) + + if not os.path.exists(path): + click.secho(f'The path "{path}" does not exist.', fg='red', file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + try: + values = SafetyPolicyFile().convert(path, None, None) + except Exception as e: + click.secho(str(e).lstrip(), fg='red', file=sys.stderr) + sys.exit(EXIT_CODE_FAILURE) + + del values['raw'] + + click.secho(f'The Safety policy file was successfully parsed with the following values:', fg='green') + click.secho(json.dumps(values, indent=4, default=str)) + + if __name__ == "__main__": cli() diff --git a/pipenv/patched/safety/constants.py b/pipenv/patched/safety/constants.py index 378b00f38d..85f41c56bf 100644 --- a/pipenv/patched/safety/constants.py +++ b/pipenv/patched/safety/constants.py @@ -2,21 +2,37 @@ import os OPEN_MIRRORS = [ - "https://raw.githubusercontent.com/pyupio/safety-db/master/data/", + "https://pyup.io/aws/safety/free/", ] +API_VERSION = 'v1/' +SAFETY_ENDPOINT = 'safety/' +API_BASE_URL = 'https://pyup.io/api/' + API_VERSION + SAFETY_ENDPOINT + API_MIRRORS = [ - "https://pyup.io/api/v1/safety/" + API_BASE_URL ] REQUEST_TIMEOUT = 5 -CACHE_VALID_SECONDS = 60 * 60 * 2 # 2 hours - -CACHE_LICENSES_VALID_SECONDS = 60 * 60 * 24 * 7 # one week - CACHE_FILE = os.path.join( os.path.expanduser("~"), ".safety", "cache.json" ) + +# Colors +YELLOW = 'yellow' +RED = 'red' +GREEN = 'green' + + +# Exit codes +EXIT_CODE_OK = 0 +EXIT_CODE_FAILURE = 1 +EXIT_CODE_VULNERABILITIES_FOUND = 64 +EXIT_CODE_INVALID_API_KEY = 65 +EXIT_CODE_TOO_MANY_REQUESTS = 66 +EXIT_CODE_UNABLE_TO_LOAD_LOCAL_VULNERABILITY_DB = 67 +EXIT_CODE_UNABLE_TO_FETCH_VULNERABILITY_DB = 68 +EXIT_CODE_MALFORMED_DB = 69 diff --git a/pipenv/patched/safety/errors.py b/pipenv/patched/safety/errors.py index a81e7ad8f2..1deb92bcd5 100644 --- a/pipenv/patched/safety/errors.py +++ b/pipenv/patched/safety/errors.py @@ -1,14 +1,106 @@ -class DatabaseFetchError(Exception): - pass +from pipenv.patched.safety.constants import EXIT_CODE_FAILURE, EXIT_CODE_INVALID_API_KEY, EXIT_CODE_TOO_MANY_REQUESTS, \ + EXIT_CODE_UNABLE_TO_FETCH_VULNERABILITY_DB, EXIT_CODE_UNABLE_TO_LOAD_LOCAL_VULNERABILITY_DB, EXIT_CODE_MALFORMED_DB + + +class SafetyException(Exception): + + def __init__(self, message="Unhandled exception happened: {info}", info=""): + self.message = message.format(info=info) + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_FAILURE + + +class SafetyError(Exception): + + def __init__(self, message="Unhandled Safety generic error"): + self.message = message + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_FAILURE + + +class MalformedDatabase(SafetyError): + + def __init__(self, reason=None, fetched_from="server", + message="Sorry, something went wrong.\n" + + "Safety CLI can not read the data fetched from {fetched_from} because is malformed.\n"): + info = "Reason, {reason}".format(reason=reason) + self.message = message.format(fetched_from=fetched_from) + (info if reason else "") + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_MALFORMED_DB + + +class DatabaseFetchError(SafetyError): + + def __init__(self, message="Unable to load vulnerability database"): + self.message = message + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_UNABLE_TO_FETCH_VULNERABILITY_DB class DatabaseFileNotFoundError(DatabaseFetchError): - pass + + def __init__(self, db=None, message="Unable to find vulnerability database in {db}"): + self.db = db + self.message = message.format(db=db) + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_UNABLE_TO_LOAD_LOCAL_VULNERABILITY_DB class InvalidKeyError(DatabaseFetchError): - pass + + def __init__(self, key=None, message="Your API Key '{key}' is invalid. See {link}.", reason=None): + self.key = key + self.link = 'https://bit.ly/3OY2wEI' + self.message = message.format(key=key, link=self.link) if key else message + info = f" Reason: {reason}" + self.message = self.message + (info if reason else "") + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_INVALID_API_KEY class TooManyRequestsError(DatabaseFetchError): - pass + + def __init__(self, reason=None, + message="Too many requests."): + info = f" Reason: {reason}" + self.message = message + (info if reason else "") + super().__init__(self.message) + + def get_exit_code(self): + return EXIT_CODE_TOO_MANY_REQUESTS + + +class NetworkConnectionError(DatabaseFetchError): + + def __init__(self, message="Check your network connection, unable to reach the server."): + self.message = message + super().__init__(self.message) + + +class RequestTimeoutError(DatabaseFetchError): + + def __init__(self, message="Check your network connection, the request timed out."): + self.message = message + super().__init__(self.message) + + +class ServerError(DatabaseFetchError): + + def __init__(self, reason=None, + message="Sorry, something went wrong.\n" + "Safety CLI can not connect to the server.\n" + + "Our engineers are working quickly to resolve the issue."): + info = f" Reason: {reason}" + self.message = message + (info if reason else "") + super().__init__(self.message) diff --git a/pipenv/patched/safety/formatter.py b/pipenv/patched/safety/formatter.py index a8ff3241ac..132bfb9e67 100644 --- a/pipenv/patched/safety/formatter.py +++ b/pipenv/patched/safety/formatter.py @@ -1,342 +1,56 @@ -# -*- coding: utf-8 -*- -import platform -import sys -import json -import os -import textwrap +import logging +from abc import ABCMeta, abstractmethod -from .util import get_packages_licenses +NOT_IMPLEMENTED = "You should implement this." -# python 2.7 compat -try: - FileNotFoundError -except NameError: - FileNotFoundError = IOError +LOG = logging.getLogger(__name__) -try: - system = platform.system() - python_version = ".".join([str(i) for i in sys.version_info[0:2]]) - # get_terminal_size exists on Python 3.4 but isn't working on windows - if system == "Windows" and python_version in ["3.4"]: - raise ImportError - from shutil import get_terminal_size -except ImportError: - # fallback for python < 3 - import subprocess - from collections import namedtuple - def get_terminal_size(): - size = namedtuple("_", ["rows", "columns"]) - try: - rows, columns = subprocess.check_output( - ['stty', 'size'], - stderr=subprocess.STDOUT - ).split() - return size(rows=int(rows), columns=int(columns)) - # this won't work - # - on windows (FileNotFoundError/OSError) - # - python 2.6 (AttributeError) - # - if the output is somehow mangled (ValueError) - except (ValueError, FileNotFoundError, OSError, - AttributeError, subprocess.CalledProcessError): - return size(rows=0, columns=0) +class FormatterAPI: + """ + Strategy Abstract class, with all the render methods that the concrete implementations should support + """ + __metaclass__ = ABCMeta -def get_advisory(vuln): - return vuln.advisory if vuln.advisory else "No advisory found for this vulnerability." + @abstractmethod + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + raise NotImplementedError(NOT_IMPLEMENTED) # pragma: no cover + @abstractmethod + def render_licenses(self, announcements, licenses): + raise NotImplementedError(NOT_IMPLEMENTED) # pragma: no cover -class SheetReport(object): - REPORT_BANNER = r""" -+==============================================================================+ -| | -| /$$$$$$ /$$ | -| /$$__ $$ | $$ | -| /$$$$$$$ /$$$$$$ | $$ \__//$$$$$$ /$$$$$$ /$$ /$$ | -| /$$_____/ |____ $$| $$$$ /$$__ $$|_ $$_/ | $$ | $$ | -| | $$$$$$ /$$$$$$$| $$_/ | $$$$$$$$ | $$ | $$ | $$ | -| \____ $$ /$$__ $$| $$ | $$_____/ | $$ /$$| $$ | $$ | -| /$$$$$$$/| $$$$$$$| $$ | $$$$$$$ | $$$$/| $$$$$$$ | -| |_______/ \_______/|__/ \_______/ \___/ \____ $$ | -| /$$ | $$ | -| | $$$$$$/ | -| by pyup.io \______/ | -| | -+==============================================================================+ - """.strip() + @abstractmethod + def render_announcements(self, announcements): + raise NotImplementedError(NOT_IMPLEMENTED) # pragma: no cover - TABLE_HEADING = r""" -+============================+===========+==========================+==========+ -| package | installed | affected | ID | -+============================+===========+==========================+==========+ - """.strip() - TABLE_HEADING_LICENSES = r""" -+=============================================+===========+====================+ -| package | version | license | -+=============================================+===========+====================+ - """.strip() +class SafetyFormatter(FormatterAPI): - REPORT_HEADING = r""" -| REPORT | - """.strip() + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + LOG.info('Safety is going to render_vulnerabilities with format: %s', self.format) + return self.format.render_vulnerabilities(announcements, vulnerabilities, remediations, full, packages) - REPORT_SECTION = r""" -+==============================================================================+ - """.strip() + def render_licenses(self, announcements, licenses): + LOG.info('Safety is going to render_licenses with format: %s', self.format) + return self.format.render_licenses(announcements, licenses) - REPORT_FOOTER = r""" -+==============================================================================+ - """.strip() + def render_announcements(self, announcements): + LOG.info('Safety is going to render_announcements with format: %s', self.format) + return self.format.render_announcements(announcements) - @staticmethod - def render(vulns, full, checked_packages, used_db): - db_format_str = '{: <' + str(51 - len(str(checked_packages))) + '}' - status = "| checked {packages} packages, using {db} |".format( - packages=checked_packages, - db=db_format_str.format(used_db), - section=SheetReport.REPORT_SECTION - ) - if vulns: - table = [] - for n, vuln in enumerate(vulns): - table.append("| {:26} | {:9} | {:24} | {:8} |".format( - vuln.name[:26], - vuln.version[:9], - vuln.spec[:24], - vuln.vuln_id - )) - if full: - table.append(SheetReport.REPORT_SECTION) + def __init__(self, output): + from pipenv.patched.safety.formatters.screen import ScreenReport + from pipenv.patched.safety.formatters.text import TextReport + from pipenv.patched.safety.formatters.json import JsonReport + from pipenv.patched.safety.formatters.bare import BareReport - if vuln.cvssv2 is not None: - base_score = vuln.cvssv2.get("base_score", "None") - impact_score = vuln.cvssv2.get("impact_score", "None") + self.format = ScreenReport() - table.append("| {:76} |".format( - "CVSS v2 | BASE SCORE: {} | IMPACT SCORE: {}".format( - base_score, - impact_score, - ) - )) - table.append(SheetReport.REPORT_SECTION) - - if vuln.cvssv3 is not None: - base_score = vuln.cvssv3.get("base_score", "None") - impact_score = vuln.cvssv3.get("impact_score", "None") - base_severity = vuln.cvssv3.get("base_severity", "None") - - table.append("| {:76} |".format( - "CVSS v3 | BASE SCORE: {} | IMPACT SCORE: {} | BASE SEVERITY: {}".format( - base_score, - impact_score, - base_severity, - ) - )) - table.append(SheetReport.REPORT_SECTION) - - advisory_lines = get_advisory(vuln).replace( - '\r', '' - ).splitlines() - - for line in advisory_lines: - if line == '': - table.append("| {:76} |".format(" ")) - for wrapped_line in textwrap.wrap(line, width=76): - try: - table.append("| {:76} |".format( - wrapped_line.encode('utf-8') - )) - except TypeError: - table.append("| {:76} |".format( - wrapped_line - )) - # append the REPORT_SECTION only if this isn't the last entry - if n + 1 < len(vulns): - table.append(SheetReport.REPORT_SECTION) - return "\n".join( - [SheetReport.REPORT_BANNER, SheetReport.REPORT_HEADING, status, SheetReport.TABLE_HEADING, - "\n".join(table), SheetReport.REPORT_FOOTER] - ) - else: - content = "| {:76} |".format("No known security vulnerabilities found.") - return "\n".join( - [SheetReport.REPORT_BANNER, SheetReport.REPORT_HEADING, status, SheetReport.REPORT_SECTION, - content, SheetReport.REPORT_FOOTER] - ) - - @staticmethod - def render_licenses(packages, packages_licenses): - heading = SheetReport.REPORT_HEADING.replace(" ", "", 12).replace( - "REPORT", " Packages licenses" - ) - if not packages_licenses: - content = "| {:76} |".format("No packages licenses found.") - return "\n".join( - [SheetReport.REPORT_BANNER, heading, SheetReport.REPORT_SECTION, - content, SheetReport.REPORT_FOOTER] - ) - - table = [] - iteration = 1 - for pkg_license in packages_licenses: - max_char = last_char = 43 # defines a limit for package name. - current_line = 1 - package = pkg_license['package'] - license = pkg_license['license'] - version = pkg_license['version'] - license_line = int(int(len(package) / max_char) / 2) + 1 # Calc to get which line to add the license info. - - table.append("| {:43} | {:9} | {:18} |".format( - package[:max_char], - version[:9] if current_line == license_line else "", - license[:18] if current_line == license_line else "", - )) - - long_name = True if len(package[max_char:]) > 0 else False - while long_name: # If the package has a long name, break it into multiple lines. - current_line += 1 - table.append("| {:43} | {:9} | {:18} |".format( - package[last_char:last_char+max_char], - version[:9] if current_line == license_line else "", - license[:18] if current_line == license_line else "", - )) - last_char = last_char+max_char - long_name = True if len(package[last_char:]) > 0 else False - - if iteration != len(packages_licenses): # Do not add dashes "----" for last package. - table.append("|" + ("-" * 78) + "|") - iteration += 1 - return "\n".join( - [SheetReport.REPORT_BANNER, heading, SheetReport.TABLE_HEADING_LICENSES, - "\n".join(table), SheetReport.REPORT_FOOTER] - ) - -class BasicReport(object): - """Basic report, intented to be used for terminals with < 80 columns""" - - @staticmethod - def render(vulns, full, checked_packages, used_db): - table = [ - "safety report", - "checked {packages} packages, using {db}".format( - packages=checked_packages, - db=used_db - ), - "---" - ] - if vulns: - - for vuln in vulns: - table.append("-> {}, installed {}, affected {}, id {}".format( - vuln.name, - vuln.version[:13], - vuln.spec[:27], - vuln.vuln_id - )) - if full: - if vuln.cvssv2 is not None: - base_score = vuln.cvssv2.get("base_score", "None") - impact_score = vuln.cvssv2.get("impact_score", "None") - - table.append("CVSS v2 -- BASE SCORE: {}, IMPACT SCORE: {}".format( - base_score, - impact_score, - )) - - if vuln.cvssv3 is not None: - base_score = vuln.cvssv3.get("base_score", "None") - impact_score = vuln.cvssv3.get("impact_score", "None") - base_severity = vuln.cvssv3.get("base_severity", "None") - - table.append("CVSS v3 -- BASE SCORE: {}, IMPACT SCORE: {}, BASE SEVERITY: {}".format( - base_score, - impact_score, - base_severity, - )) - - table.append(get_advisory(vuln)) - table.append("--") - else: - table.append("No known security vulnerabilities found.") - return "\n".join( - table - ) - - @staticmethod - def render_licenses(packages, packages_licenses): - table = [ - "safety", - "packages licenses", - "---" - ] - if not packages_licenses: - table.append("No packages licenses found.") - return "\n".join(table) - - for pkg_license in packages_licenses: - text = pkg_license['package'] + \ - ", version " + pkg_license['version'] + \ - ", license " + pkg_license['license'] + "\n" - table.append(text) - - return "\n".join(table) - -class JsonReport(object): - """Json report, for when the output is input for something else""" - - @staticmethod - def render(vulns, full): - return json.dumps(vulns, indent=4, sort_keys=True) - - @staticmethod - def render_licenses(packages_licenses): - return json.dumps(packages_licenses, indent=4, sort_keys=True) - - -class BareReport(object): - """Bare report, for command line tools""" - @staticmethod - def render(vulns, full): - return " ".join(set([v.name for v in vulns])) - - @staticmethod - def render_licenses(packages_licenses): - licenses = set([pkg_li.get('license') for pkg_li in packages_licenses]) - if "N/A" in licenses: - licenses.remove("N/A") - sorted_licenses = sorted(licenses) - return " ".join(sorted_licenses) - - -def get_used_db(key, db): - key = key if key else os.environ.get("SAFETY_API_KEY", False) - if db: - return "local DB" - if key: - return "pyup.io's DB" - return "free DB (updated once a month)" - - -def report(vulns, full=False, json_report=False, bare_report=False, checked_packages=0, db=None, key=None): - if bare_report: - return BareReport.render(vulns, full=full) - if json_report: - return JsonReport.render(vulns, full=full) - size = get_terminal_size() - used_db = get_used_db(key=key, db=db) - if size.columns >= 80: - return SheetReport.render(vulns, full=full, checked_packages=checked_packages, used_db=used_db) - return BasicReport.render(vulns, full=full, checked_packages=checked_packages, used_db=used_db) - - -def license_report(packages, licenses, json_report=False, bare_report=False): - if json_report: - return JsonReport.render_licenses(packages_licenses=licenses) - elif bare_report: - return BareReport.render_licenses(packages_licenses=licenses) - - size = get_terminal_size() - if size.columns >= 80: - return SheetReport.render_licenses(packages, licenses) - return BasicReport.render_licenses(packages, licenses) + if output == 'json': + self.format = JsonReport() + elif output == 'bare': + self.format = BareReport() + elif output == 'text': + self.format = TextReport() diff --git a/pipenv/patched/safety/formatters/__init__.py b/pipenv/patched/safety/formatters/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/patched/safety/formatters/bare.py b/pipenv/patched/safety/formatters/bare.py new file mode 100644 index 0000000000..1e730a5c63 --- /dev/null +++ b/pipenv/patched/safety/formatters/bare.py @@ -0,0 +1,38 @@ +from collections import namedtuple + +from pipenv.patched.safety.formatter import FormatterAPI +from pipenv.patched.safety.util import get_basic_announcements + + +class BareReport(FormatterAPI): + """Bare report, for command line tools""" + + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + parsed_announcements = [] + + Announcement = namedtuple("Announcement", ["name"]) + + for announcement in get_basic_announcements(announcements): + normalized_message = "-".join(announcement.get('message', 'none').lower().split()) + parsed_announcements.append(Announcement(name=normalized_message)) + + announcements_to_render = [announcement.name for announcement in parsed_announcements] + affected_packages = list(set([v.package_name for v in vulnerabilities if not v.ignored])) + + return " ".join(announcements_to_render + affected_packages) + + def render_licenses(self, announcements, packages_licenses): + parsed_announcements = [] + + for announcement in get_basic_announcements(announcements): + normalized_message = "-".join(announcement.get('message', 'none').lower().split()) + parsed_announcements.append({'license': normalized_message}) + + announcements_to_render = [announcement.get('license') for announcement in parsed_announcements] + + licenses = list(set([pkg_li.get('license') for pkg_li in packages_licenses])) + sorted_licenses = sorted(licenses) + return " ".join(announcements_to_render + sorted_licenses) + + def render_announcements(self, announcements): + print('render_announcements bare') diff --git a/pipenv/patched/safety/formatters/json.py b/pipenv/patched/safety/formatters/json.py new file mode 100644 index 0000000000..a584ee3475 --- /dev/null +++ b/pipenv/patched/safety/formatters/json.py @@ -0,0 +1,75 @@ +import logging + +import json as json_parser + +from pipenv.patched.safety.formatter import FormatterAPI +from pipenv.patched.safety.output_utils import get_report_brief_info +from pipenv.patched.safety.util import get_basic_announcements + +LOG = logging.getLogger(__name__) + + +class JsonReport(FormatterAPI): + """Json report, for when the output is input for something else""" + + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + remediations_recommended = len(remediations.keys()) + LOG.debug('Rendering %s vulnerabilities, %s remediations with full_report: %s', len(vulnerabilities), + remediations_recommended, full) + vulns_ignored = [vuln.to_dict() for vuln in vulnerabilities if vuln.ignored] + vulns = [vuln.to_dict() for vuln in vulnerabilities if not vuln.ignored] + + report = get_report_brief_info(as_dict=True, report_type=1, vulnerabilities_found=len(vulns), + vulnerabilities_ignored=len(vulns_ignored), + remediations_recommended=remediations_recommended) + + remed = {} + for k, v in remediations.items(): + if k not in remed: + remed[k] = {} + + closest = v.get('closest_secure_version', {}) + upgrade = closest.get('major', None) + downgrade = closest.get('minor', None) + + recommended_version = None + + if upgrade: + recommended_version = str(upgrade) + elif downgrade: + recommended_version = str(downgrade) + + remed[k]['current_version'] = v.get('version', None) + remed[k]['vulnerabilities_found'] = v.get('vulns_found', 0) + remed[k]['recommended_version'] = recommended_version + remed[k]['other_recommended_versions'] = [other_v for other_v in v.get('secure_versions', []) if + other_v != recommended_version] + remed[k]['more_info_url'] = v.get('more_info_url', '') + + template = { + "report_meta": report, + "scanned_packages": {p.name: p.to_dict(short_version=True) for p in packages}, + "affected_packages": {v.pkg.name: v.pkg.to_dict() for v in vulnerabilities}, + "announcements": [{'type': item.get('type'), 'message': item.get('message')} for item in + get_basic_announcements(announcements)], + "vulnerabilities": vulns, + "ignored_vulnerabilities": vulns_ignored, + "remediations": remed + } + + return json_parser.dumps(template, indent=4) + + def render_licenses(self, announcements, licenses): + unique_license_types = set([lic['license'] for lic in licenses]) + report = get_report_brief_info(as_dict=True, report_type=2, licenses_found=len(unique_license_types)) + + template = { + "report_meta": report, + "announcements": get_basic_announcements(announcements), + "licenses": licenses, + } + + return json_parser.dumps(template, indent=4) + + def render_announcements(self, announcements): + return json_parser.dumps({"announcements": get_basic_announcements(announcements)}, indent=4) diff --git a/pipenv/patched/safety/formatters/screen.py b/pipenv/patched/safety/formatters/screen.py new file mode 100644 index 0000000000..d75d1ff2a4 --- /dev/null +++ b/pipenv/patched/safety/formatters/screen.py @@ -0,0 +1,143 @@ +import pipenv.vendor.click as click + +from pipenv.patched.safety.formatter import FormatterAPI +from pipenv.patched.safety.output_utils import build_announcements_section_content, format_long_text, \ + add_empty_line, format_vulnerability, get_final_brief, \ + build_report_brief_section, format_license, get_final_brief_license, build_remediation_section, \ + build_primary_announcement +from pipenv.patched.safety.util import get_primary_announcement, get_basic_announcements, get_terminal_size + + +class ScreenReport(FormatterAPI): + DIVIDER_SECTIONS = '+' + '=' * (get_terminal_size().columns - 2) + '+' + + REPORT_BANNER = DIVIDER_SECTIONS + '\n' + r""" + /$$$$$$ /$$ + /$$__ $$ | $$ + /$$$$$$$ /$$$$$$ | $$ \__//$$$$$$ /$$$$$$ /$$ /$$ + /$$_____/ |____ $$| $$$$ /$$__ $$|_ $$_/ | $$ | $$ + | $$$$$$ /$$$$$$$| $$_/ | $$$$$$$$ | $$ | $$ | $$ + \____ $$ /$$__ $$| $$ | $$_____/ | $$ /$$| $$ | $$ + /$$$$$$$/| $$$$$$$| $$ | $$$$$$$ | $$$$/| $$$$$$$ + |_______/ \_______/|__/ \_______/ \___/ \____ $$ + /$$ | $$ + | $$$$$$/ + by pyup.io \______/ + +""" + DIVIDER_SECTIONS + + ANNOUNCEMENTS_HEADING = format_long_text(click.style('ANNOUNCEMENTS', bold=True)) + + def __build_announcements_section(self, announcements): + announcements_section = [] + + basic_announcements = get_basic_announcements(announcements) + + if basic_announcements: + announcements_content = build_announcements_section_content(basic_announcements) + announcements_section = [add_empty_line(), self.ANNOUNCEMENTS_HEADING, add_empty_line(), + announcements_content, add_empty_line(), self.DIVIDER_SECTIONS] + + return announcements_section + + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + announcements_section = self.__build_announcements_section(announcements) + primary_announcement = get_primary_announcement(announcements) + remediation_section = build_remediation_section(remediations) + end_content = [] + + if primary_announcement: + end_content = [add_empty_line(), + build_primary_announcement(primary_announcement, columns=get_terminal_size().columns), + self.DIVIDER_SECTIONS] + + table = [] + ignored = {} + total_ignored = 0 + + for n, vuln in enumerate(vulnerabilities): + if vuln.ignored: + total_ignored += 1 + ignored[vuln.package_name] = ignored.get(vuln.package_name, 0) + 1 + table.append(format_vulnerability(vuln, full)) + + report_brief_section = build_report_brief_section(primary_announcement=primary_announcement, report_type=1, + vulnerabilities_found=max(0, len(vulnerabilities)-total_ignored), + vulnerabilities_ignored=total_ignored, + remediations_recommended=len(remediations)) + + if vulnerabilities: + + final_brief = get_final_brief(len(vulnerabilities), len(remediations), ignored, total_ignored) + + return "\n".join( + [ScreenReport.REPORT_BANNER] + announcements_section + [report_brief_section, + add_empty_line(), + self.DIVIDER_SECTIONS, + format_long_text( + click.style('VULNERABILITIES FOUND', + bold=True, fg='red')), + self.DIVIDER_SECTIONS, + add_empty_line(), + "\n\n".join(table), + final_brief, + add_empty_line(), + self.DIVIDER_SECTIONS] + + remediation_section + end_content + ) + else: + content = format_long_text(click.style("No known security vulnerabilities found.", bold=True, fg='green')) + return "\n".join( + [ScreenReport.REPORT_BANNER] + announcements_section + [report_brief_section, + self.DIVIDER_SECTIONS, + add_empty_line(), + content, + add_empty_line(), + self.DIVIDER_SECTIONS] + + end_content + ) + + def render_licenses(self, announcements, licenses): + unique_license_types = set([lic['license'] for lic in licenses]) + + report_brief_section = build_report_brief_section(primary_announcement=get_primary_announcement(announcements), + report_type=2, licenses_found=len(unique_license_types)) + announcements_section = self.__build_announcements_section(announcements) + + if not licenses: + content = format_long_text(click.style("No packages licenses found.", bold=True, fg='red')) + return "\n".join( + [ScreenReport.REPORT_BANNER] + announcements_section + [report_brief_section, + self.DIVIDER_SECTIONS, + add_empty_line(), + content, + add_empty_line(), + self.DIVIDER_SECTIONS] + ) + + table = [] + for license in licenses: + table.append(format_license(license)) + + final_brief = get_final_brief_license(unique_license_types) + + return "\n".join( + [ScreenReport.REPORT_BANNER] + announcements_section + [report_brief_section, + add_empty_line(), + self.DIVIDER_SECTIONS, + format_long_text( + click.style('LICENSES FOUND', + bold=True, fg='yellow')), + self.DIVIDER_SECTIONS, + add_empty_line(), + "\n".join(table), + final_brief, + add_empty_line(), + self.DIVIDER_SECTIONS] + ) + + def render_announcements(self, announcements): + return self.__build_announcements_section(announcements) + + + diff --git a/pipenv/patched/safety/formatters/text.py b/pipenv/patched/safety/formatters/text.py new file mode 100644 index 0000000000..4f40a961b0 --- /dev/null +++ b/pipenv/patched/safety/formatters/text.py @@ -0,0 +1,134 @@ +import pipenv.vendor.click as click + +from pipenv.patched.safety.formatter import FormatterAPI +from pipenv.patched.safety.output_utils import build_announcements_section_content, format_vulnerability, \ + build_report_brief_section, get_final_brief_license, add_empty_line, get_final_brief, build_remediation_section, \ + build_primary_announcement +from pipenv.patched.safety.util import get_primary_announcement, get_basic_announcements + + +class TextReport(FormatterAPI): + """Basic report, intented to be used for terminals with < 80 columns""" + + SMALL_DIVIDER_SECTIONS = '+' + '=' * 78 + '+' + + TEXT_REPORT_BANNER = SMALL_DIVIDER_SECTIONS + '\n' + r""" + /$$$$$$ /$$ + /$$__ $$ | $$ + /$$$$$$$ /$$$$$$ | $$ \__//$$$$$$ /$$$$$$ /$$ /$$ + /$$_____/ |____ $$| $$$$ /$$__ $$|_ $$_/ | $$ | $$ + | $$$$$$ /$$$$$$$| $$_/ | $$$$$$$$ | $$ | $$ | $$ + \____ $$ /$$__ $$| $$ | $$_____/ | $$ /$$| $$ | $$ + /$$$$$$$/| $$$$$$$| $$ | $$$$$$$ | $$$$/| $$$$$$$ + |_______/ \_______/|__/ \_______/ \___/ \____ $$ + /$$ | $$ + | $$$$$$/ + by pyup.io \______/ + +""" + SMALL_DIVIDER_SECTIONS + + def __build_announcements_section(self, announcements): + announcements_table = [] + + basic_announcements = get_basic_announcements(announcements) + + if basic_announcements: + announcements_content = click.unstyle(build_announcements_section_content(basic_announcements, + columns=80, + start_line_decorator=' ' * 2, + end_line_decorator='')) + announcements_table = [add_empty_line(), 'ANNOUNCEMENTS', add_empty_line(), + announcements_content, add_empty_line(), self.SMALL_DIVIDER_SECTIONS] + + return announcements_table + + def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages): + primary_announcement = get_primary_announcement(announcements) + remediation_section = [click.unstyle(rem) for rem in build_remediation_section(remediations, columns=80)] + end_content = [] + + if primary_announcement: + end_content = [add_empty_line(), + build_primary_announcement(primary_announcement, columns=80, only_text=True), + self.SMALL_DIVIDER_SECTIONS] + + announcement_section = self.__build_announcements_section(announcements) + + ignored = {} + total_ignored = 0 + + for n, vuln in enumerate(vulnerabilities): + if vuln.ignored: + total_ignored += 1 + ignored[vuln.package_name] = ignored.get(vuln.package_name, 0) + 1 + + report_brief_section = click.unstyle( + build_report_brief_section(columns=80, primary_announcement=primary_announcement, + vulnerabilities_found=max(0, len(vulnerabilities)-total_ignored), + vulnerabilities_ignored=total_ignored, + remediations_recommended=len(remediations))) + + table = [self.TEXT_REPORT_BANNER] + announcement_section + [ + report_brief_section, + '', + self.SMALL_DIVIDER_SECTIONS, + ] + + if vulnerabilities: + table += [" VULNERABILITIES FOUND", self.SMALL_DIVIDER_SECTIONS] + + for vuln in vulnerabilities: + table.append('\n' + format_vulnerability(vuln, full, only_text=True, columns=80)) + + final_brief = click.unstyle(get_final_brief(len(vulnerabilities), len(remediations), ignored, total_ignored, + kwargs={'columns': 80})) + table += [final_brief, add_empty_line(), self.SMALL_DIVIDER_SECTIONS] + remediation_section + end_content + + else: + table += [add_empty_line(), " No known security vulnerabilities found.", add_empty_line(), + self.SMALL_DIVIDER_SECTIONS] + end_content + + return "\n".join( + table + ) + + def render_licenses(self, announcements, licenses): + unique_license_types = set([lic['license'] for lic in licenses]) + + report_brief_section = click.unstyle( + build_report_brief_section(columns=80, primary_announcement=get_primary_announcement(announcements), + licenses_found=len(unique_license_types))) + + packages_licenses = licenses + announcements_table = self.__build_announcements_section(announcements) + + final_brief = click.unstyle( + get_final_brief_license(unique_license_types, kwargs={'columns': 80})) + + table = [self.TEXT_REPORT_BANNER] + announcements_table + [ + report_brief_section, + self.SMALL_DIVIDER_SECTIONS, + " LICENSES", + self.SMALL_DIVIDER_SECTIONS, + add_empty_line(), + ] + + if not packages_licenses: + table.append(" No packages licenses found.") + table += [final_brief, add_empty_line(), self.SMALL_DIVIDER_SECTIONS] + + return "\n".join(table) + + for pkg_license in packages_licenses: + text = " {0}, version {1}, license {2}\n".format(pkg_license['package'], pkg_license['version'], + pkg_license['license']) + table.append(text) + + table += [final_brief, add_empty_line(), self.SMALL_DIVIDER_SECTIONS] + + return "\n".join(table) + + def render_announcements(self, announcements): + rows = self.__build_announcements_section(announcements) + rows.insert(0, self.SMALL_DIVIDER_SECTIONS) + return '\n'.join(rows) diff --git a/pipenv/patched/safety/models.py b/pipenv/patched/safety/models.py new file mode 100644 index 0000000000..2e01778dc1 --- /dev/null +++ b/pipenv/patched/safety/models.py @@ -0,0 +1,110 @@ +from collections import namedtuple +from datetime import datetime +from typing import NamedTuple + + +class DictConverter(object): + + def to_dict(self, **kwargs): + pass + + +announcement_nmt = namedtuple('Announcement', ['type', 'message']) +remediation_nmt = namedtuple('Remediation', ['Package', 'closest_secure_version', 'secure_versions', + 'latest_package_version']) +cve_nmt = namedtuple('Cve', ['name', 'cvssv2', 'cvssv3']) +severity_nmt = namedtuple('Severity', ['source', 'cvssv2', 'cvssv3']) +vulnerability_nmt = namedtuple('Vulnerability', + ['vulnerability_id', 'package_name', 'pkg', 'ignored', 'ignored_reason', 'ignored_expires', + 'vulnerable_spec', 'all_vulnerable_specs', 'analyzed_version', 'advisory', + 'is_transitive', 'published_date', 'fixed_versions', + 'closest_versions_without_known_vulnerabilities', 'resources', 'CVE', 'severity', + 'affected_versions', 'more_info_url']) +package_nmt = namedtuple('Package', ['name', 'version', 'found', 'insecure_versions', 'secure_versions', + 'latest_version_without_known_vulnerabilities', 'latest_version', 'more_info_url']) +package_nmt.__new__.__defaults__ = (None,) * len(package_nmt._fields) # Ugly hack for now +RequirementFile = namedtuple('RequirementFile', ['path']) + + +class Package(package_nmt, DictConverter): + + def to_dict(self, **kwargs): + if kwargs.get('short_version', False): + return { + 'name': self.name, + 'version': self.version, + } + + return {'name': self.name, + 'version': self.version, + 'found': self.found, + 'insecure_versions': self.insecure_versions, + 'secure_versions': self.secure_versions, + 'latest_version_without_known_vulnerabilities': self.latest_version_without_known_vulnerabilities, + 'latest_version': self.latest_version, + 'more_info_url': self.more_info_url + } + + +class Announcement(announcement_nmt): + pass + + +class Remediation(remediation_nmt, DictConverter): + + def to_dict(self): + return {'package': self.Package.name, + 'closest_secure_version': self.closest_secure_version, + 'secure_versions': self.secure_versions, + 'latest_package_version': self.latest_package_version + } + + +class CVE(cve_nmt, DictConverter): + + def to_dict(self): + return {'name': self.name, 'cvssv2': self.cvssv2, 'cvssv3': self.cvssv3} + + +class Severity(severity_nmt, DictConverter): + def to_dict(self): + result = {'severity': {'source': self.source}} + + result['severity']['cvssv2'] = self.cvssv2 + result['severity']['cvssv3'] = self.cvssv3 + + return result + + +class Vulnerability(vulnerability_nmt): + + def to_dict(self): + empty_list_if_none = ['fixed_versions', 'closest_versions_without_known_vulnerabilities', 'resources'] + result = { + } + + ignore = ['pkg'] + + for field, value in zip(self._fields, self): + if field in ignore: + continue + + if value is None and field in empty_list_if_none: + value = [] + + if isinstance(value, CVE): + val = None + if value.name.startswith("CVE"): + val = value.name + result[field] = val + elif isinstance(value, DictConverter): + result.update(value.to_dict()) + elif isinstance(value, datetime): + result[field] = str(value) + else: + result[field] = value + + return result + + def get_advisory(self): + return self.advisory.replace('\r', '') if self.advisory else "No advisory found for this vulnerability." diff --git a/pipenv/patched/safety/output_utils.py b/pipenv/patched/safety/output_utils.py new file mode 100644 index 0000000000..34edac8ac5 --- /dev/null +++ b/pipenv/patched/safety/output_utils.py @@ -0,0 +1,683 @@ +import json +import logging +import textwrap +from datetime import datetime + +import pipenv.vendor.click as click + +from pipenv.patched.safety.constants import RED, YELLOW +from pipenv.patched.safety.util import get_safety_version, Package, get_terminal_size, SafetyContext, build_telemetry_data, build_git_data + +LOG = logging.getLogger(__name__) + + +def build_announcements_section_content(announcements, columns=get_terminal_size().columns, + start_line_decorator=' ', end_line_decorator=' '): + section = '' + + for i, announcement in enumerate(announcements): + + color = '' + if announcement.get('type') == 'error': + color = RED + elif announcement.get('type') == 'warning': + color = YELLOW + + item = '{message}'.format( + message=format_long_text('* ' + announcement.get('message'), color, columns, + start_line_decorator, end_line_decorator)) + section += '{item}'.format(item=item) + + if i + 1 < len(announcements): + section += '\n' + + return section + + +def add_empty_line(): + return format_long_text('') + + +def style_lines(lines, columns, pre_processed_text='', start_line=' ' * 4, end_line=' ' * 4): + styled_text = pre_processed_text + + for line in lines: + styled_line = '' + left_padding = ' ' * line.get('left_padding', 0) + + for i, word in enumerate(line.get('words', [])): + if word.get('style', {}): + text = '' + + if i == 0: + text = left_padding # Include the line padding in the word to avoid Github issues + left_padding = '' # Clean left padding to avoid be added two times + + text += word.get('value', '') + + styled_line += click.style(text=text, **word.get('style', {})) + else: + styled_line += word.get('value', '') + + styled_text += format_long_text(styled_line, columns=columns, start_line_decorator=start_line, + end_line_decorator=end_line, + left_padding=left_padding, **line.get('format', {})) + '\n' + + return styled_text + + +def format_vulnerability(vulnerability, full_mode, only_text=False, columns=get_terminal_size().columns): + + common_format = {'left_padding': 3, 'format': {'sub_indent': ' ' * 3, 'max_lines': None}} + + styled_vulnerability = [ + {'words': [{'style': {'bold': True}, 'value': 'Vulnerability ID: '}, {'value': vulnerability.vulnerability_id}]}, + ] + + vulnerability_spec = [ + {'words': [{'style': {'bold': True}, 'value': 'Affected spec: '}, {'value': vulnerability.vulnerable_spec}]}] + + cve = vulnerability.CVE + + cvssv2_line = None + cve_lines = [] + + if cve: + if full_mode and cve.cvssv2: + b = cve.cvssv2.get("base_score", "-") + s = cve.cvssv2.get("impact_score", "-") + v = cve.cvssv2.get("vector_string", "-") + + # Reset sub_indent as the left_margin is going to be applied in this case + cvssv2_line = {'format': {'sub_indent': ''}, 'words': [ + {'value': f'CVSS v2, BASE SCORE {b}, IMPACT SCORE {s}, VECTOR STRING {v}'}, + ]} + + if cve.cvssv3 and "base_severity" in cve.cvssv3.keys(): + cvss_base_severity_style = {'bold': True} + base_severity = cve.cvssv3.get("base_severity", "-") + + if base_severity.upper() in ['HIGH', 'CRITICAL']: + cvss_base_severity_style['fg'] = 'red' + + b = cve.cvssv3.get("base_score", "-") + + if full_mode: + s = cve.cvssv3.get("impact_score", "-") + v = cve.cvssv3.get("vector_string", "-") + + cvssv3_text = f'CVSS v3, BASE SCORE {b}, IMPACT SCORE {s}, VECTOR STRING {v}' + + else: + cvssv3_text = f'CVSS v3, BASE SCORE {b} ' + + cve_lines = [ + {'words': [{'style': {'bold': True}, 'value': '{0} is '.format(cve.name)}, + {'style': cvss_base_severity_style, + 'value': f'{base_severity} SEVERITY => '}, + {'value': cvssv3_text}, + ]}, + ] + + if cvssv2_line: + cve_lines.append(cvssv2_line) + + elif cve.name: + cve_lines = [ + {'words': [{'style': {'bold': True}, 'value': cve.name}]} + ] + + advisory_format = {'sub_indent': ' ' * 3, 'max_lines': None} if full_mode else {'sub_indent': ' ' * 3, + 'max_lines': 2} + + basic_vuln_data_lines = [ + {'format': advisory_format, 'words': [ + {'style': {'bold': True}, 'value': 'ADVISORY: '}, + {'value': vulnerability.advisory.replace('\n', '')}]} + ] + + if SafetyContext().key: + fixed_version_line = {'words': [ + {'style': {'bold': True}, 'value': 'Fixed versions: '}, + {'value': ', '.join(vulnerability.fixed_versions) if vulnerability.fixed_versions else 'No known fix'} + ]} + + basic_vuln_data_lines.append(fixed_version_line) + + more_info_line = [{'words': [{'style': {'bold': True}, 'value': 'For more information, please visit '}, + {'value': click.style(vulnerability.more_info_url)}]}] + + vuln_title = f'-> Vulnerability found in {vulnerability.package_name} version {vulnerability.analyzed_version}\n' + + styled_text = click.style(vuln_title, fg='red') + + to_print = styled_vulnerability + + if not vulnerability.ignored: + to_print += vulnerability_spec + basic_vuln_data_lines + cve_lines + else: + generic_reason = 'This vulnerability is being ignored' + if vulnerability.ignored_expires: + generic_reason += f" until {vulnerability.ignored_expires.strftime('%Y-%m-%d %H:%M:%S UTC')}. " \ + f"See your configurations" + + specific_reason = None + if vulnerability.ignored_reason: + specific_reason = [ + {'words': [{'style': {'bold': True}, 'value': 'Reason: '}, {'value': vulnerability.ignored_reason}]}] + + expire_section = [{'words': [ + {'style': {'bold': True, 'fg': 'green'}, 'value': f'{generic_reason}.'}, ]}] + + if specific_reason: + expire_section += specific_reason + + to_print += expire_section + + to_print += more_info_line + + to_print = [{**common_format, **line} for line in to_print] + + content = style_lines(to_print, columns, styled_text, start_line='', end_line='', ) + + return click.unstyle(content) if only_text else content + + +def format_license(license, only_text=False, columns=get_terminal_size().columns): + to_print = [ + {'words': [{'style': {'bold': True}, 'value': license['package']}, + {'value': ' version {0} found using license '.format(license['version'])}, + {'style': {'bold': True}, 'value': license['license']} + ] + }, + ] + + content = style_lines(to_print, columns, '-> ', start_line='', end_line='') + + return click.unstyle(content) if only_text else content + + +def build_remediation_section(remediations, only_text=False, columns=get_terminal_size().columns, kwargs=None): + columns -= 2 + left_padding = ' ' * 3 + + if not kwargs: + # Reset default params in the format_long_text func + kwargs = {'left_padding': '', 'columns': columns, 'start_line_decorator': '', 'end_line_decorator': '', + 'sub_indent': left_padding} + + END_SECTION = '+' + '=' * columns + '+' + + if not remediations: + return [] + + content = '' + total_vulns = 0 + total_packages = len(remediations.keys()) + + for pkg in remediations.keys(): + total_vulns += remediations[pkg]['vulns_found'] + upgrade_to = remediations[pkg]['closest_secure_version']['major'] + downgrade_to = remediations[pkg]['closest_secure_version']['minor'] + fix_version = None + + if upgrade_to: + fix_version = str(upgrade_to) + elif downgrade_to: + fix_version = str(downgrade_to) + + new_line = '\n' + + other_options = [str(fix) for fix in remediations[pkg].get('secure_versions', []) if str(fix) != fix_version] + raw_recommendation = f"We recommend upgrading to version {upgrade_to} of {pkg}." + + if other_options: + raw_other_options = ', '.join(other_options) + raw_pre_other_options = 'Other versions without known vulnerabilities are:' + if len(other_options) == 1: + raw_pre_other_options = 'Other version without known vulnerabilities is' + raw_recommendation = f"{raw_recommendation} {raw_pre_other_options} " \ + f"{raw_other_options}" + + remediation_content = [ + f'{left_padding}The closest version with no known vulnerabilities is ' + click.style(upgrade_to, bold=True), + new_line, + click.style(f'{left_padding}{raw_recommendation}', bold=True, fg='green') + ] + + if not fix_version: + remediation_content = [new_line, + click.style(f'{left_padding}There is no known fix for this vulnerability.', bold=True, fg='yellow')] + + text = 'vulnerabilities' if remediations[pkg]['vulns_found'] > 1 else 'vulnerability' + + raw_rem_title = f"-> {pkg} version {remediations[pkg]['version']} was found, " \ + f"which has {remediations[pkg]['vulns_found']} {text}" + + remediation_title = click.style(raw_rem_title, fg=RED, bold=True) + + content += new_line + format_long_text(remediation_title, **kwargs) + new_line + + pre_content = remediation_content + [ + f"{left_padding}For more information, please visit {remediations[pkg]['more_info_url']}", + f'{left_padding}Always check for breaking changes when upgrading packages.', + new_line] + + for i, element in enumerate(pre_content): + content += format_long_text(element, **kwargs) + + if i + 1 < len(pre_content): + content += '\n' + + title = format_long_text(click.style(f'{left_padding}REMEDIATIONS', fg='green', bold=True), **kwargs) + + body = [content] + + if not is_using_api_key(): + vuln_text = 'vulnerabilities were' if total_vulns != 1 else 'vulnerability was' + pkg_text = 'packages' if total_packages > 1 else 'package' + msg = "{0} {1} found in {2} {3}. " \ + "For detailed remediation & fix recommendations, upgrade to a commercial license."\ + .format(total_vulns, vuln_text, total_packages, pkg_text) + content = '\n' + format_long_text(msg, left_padding=' ', columns=columns) + '\n' + body = [content] + + body.append(END_SECTION) + + content = [title] + body + + if only_text: + content = [click.unstyle(item) for item in content] + + return content + + +def get_final_brief(total_vulns_found, total_remediations, ignored, total_ignored, kwargs=None): + if not kwargs: + kwargs = {} + + total_vulns = max(0, total_vulns_found - total_ignored) + + vuln_text = 'vulnerabilities' if total_ignored > 1 else 'vulnerability' + pkg_text = 'packages were' if len(ignored.keys()) > 1 else 'package was' + + policy_file_text = ' using a safety policy file' if is_using_a_safety_policy_file() else '' + + vuln_brief = f" {total_vulns} vulnerabilit{'y was' if total_vulns == 1 else 'ies were'} found." + ignored_text = f' {total_ignored} {vuln_text} from {len(ignored.keys())} {pkg_text} ignored.' if ignored else '' + remediation_text = f" {total_remediations} remediation{' was' if total_remediations == 1 else 's were'} " \ + f"recommended." if is_using_api_key() else '' + + raw_brief = f"Scan was completed{policy_file_text}.{vuln_brief}{ignored_text}{remediation_text}" + + return format_long_text(raw_brief, start_line_decorator=' ', **kwargs) + + +def get_final_brief_license(licenses, kwargs=None): + if not kwargs: + kwargs = {} + + licenses_text = ' Scan was completed.' + + if licenses: + licenses_text = 'The following software licenses were present in your system: {0}'.format(', '.join(licenses)) + + return format_long_text("{0}".format(licenses_text), start_line_decorator=' ', **kwargs) + + +def format_long_text(text, color='', columns=get_terminal_size().columns, start_line_decorator=' ', end_line_decorator=' ', left_padding='', max_lines=None, styling=None, indent='', sub_indent=''): + if not styling: + styling = {} + + if color: + styling.update({'fg': color}) + + columns -= len(start_line_decorator) + len(end_line_decorator) + formatted_lines = [] + lines = text.replace('\r', '').splitlines() + + for line in lines: + base_format = "{:" + str(columns) + "}" + if line == '': + empty_line = base_format.format(" ") + formatted_lines.append("{0}{1}{2}".format(start_line_decorator, empty_line, end_line_decorator)) + wrapped_lines = textwrap.wrap(line, width=columns, max_lines=max_lines, initial_indent=indent, subsequent_indent=sub_indent, placeholder='...') + for wrapped_line in wrapped_lines: + try: + new_line = left_padding + wrapped_line.encode('utf-8') + except TypeError: + new_line = left_padding + wrapped_line + + if styling: + new_line = click.style(new_line, **styling) + + formatted_lines.append(f"{start_line_decorator}{new_line}{end_line_decorator}") + + return "\n".join(formatted_lines) + + +def get_printable_list_of_scanned_items(scanning_target): + context = SafetyContext() + + result = [] + scanned_items_data = [] + + if scanning_target == 'environment': + locations = set([pkg.found for pkg in context.packages if isinstance(pkg, Package)]) + + for path in locations: + result.append([{'styled': False, 'value': '-> ' + path}]) + scanned_items_data.append(path) + + if len(locations) <= 0: + msg = 'No locations found in the environment' + result.append([{'styled': False, 'value': msg}]) + scanned_items_data.append(msg) + + elif scanning_target == 'stdin': + scanned_stdin = [pkg.name for pkg in context.packages if isinstance(pkg, Package)] + value = 'No found packages in stdin' + scanned_items_data = [value] + + if len(scanned_stdin) > 0: + value = ', '.join(scanned_stdin) + scanned_items_data = scanned_stdin + + result.append( + [{'styled': False, 'value': value}]) + + elif scanning_target == 'files': + for file in context.params.get('files', []): + result.append([{'styled': False, 'value': f'-> {file.name}'}]) + scanned_items_data.append(file.name) + elif scanning_target == 'file': + file = context.params.get('file', None) + name = file.name if file else '' + result.append([{'styled': False, 'value': f'-> {name}'}]) + scanned_items_data.append(name) + + return result, scanned_items_data + + +REPORT_HEADING = format_long_text(click.style('REPORT', bold=True)) + + +def build_report_brief_section(columns=None, primary_announcement=None, report_type=1, **kwargs): + if not columns: + columns = get_terminal_size().columns + + styled_brief_lines = [] + + if primary_announcement: + styled_brief_lines.append( + build_primary_announcement(columns=columns, primary_announcement=primary_announcement)) + + for line in get_report_brief_info(report_type=report_type, **kwargs): + ln = '' + padding = ' ' * 2 + + for i, words in enumerate(line): + processed_words = words.get('value', '') + if words.get('style', False): + text = '' + if i == 0: + text = padding + padding = '' + text += processed_words + + processed_words = click.style(text, bold=True) + + ln += processed_words + + styled_brief_lines.append(format_long_text(ln, color='', columns=columns, start_line_decorator='', + left_padding=padding, end_line_decorator='', sub_indent=' ' * 2)) + + return "\n".join([add_empty_line(), REPORT_HEADING, add_empty_line(), '\n'.join(styled_brief_lines)]) + + +def build_report_for_review_vuln_report(as_dict=False): + ctx = SafetyContext() + report_from_file = ctx.review + packages = ctx.packages + + if as_dict: + return report_from_file + + policy_f_name = report_from_file.get('policy_file', None) + safety_policy_used = [] + if policy_f_name: + safety_policy_used = [ + {'style': False, 'value': '\nScanning using a security policy file'}, + {'style': True, 'value': ' {0}'.format(policy_f_name)}, + ] + + action_executed = [ + {'style': True, 'value': 'Scanning dependencies'}, + {'style': False, 'value': ' in your '}, + {'style': True, 'value': report_from_file.get('scan_target', '-') + ':'}, + ] + + scanned_items = [] + + for name in report_from_file.get('scanned', []): + scanned_items.append([{'styled': False, 'value': '-> ' + name}]) + + nl = [{'style': False, 'value': ''}] + using_sentence = build_using_sentence(report_from_file.get('api_key', None), + report_from_file.get('local_database_path_used', None)) + scanned_count_sentence = build_scanned_count_sentence(packages) + old_timestamp = report_from_file.get('timestamp', None) + + old_timestamp = [{'style': False, 'value': 'Report generated '}, {'style': True, 'value': old_timestamp}] + now = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + current_timestamp = [{'style': False, 'value': 'Timestamp '}, {'style': True, 'value': now}] + + brief_info = [[{'style': False, 'value': 'Safety '}, + {'style': True, 'value': 'v' + report_from_file.get('safety_version', '-')}, + {'style': False, 'value': ' is scanning for '}, + {'style': True, 'value': 'Vulnerabilities'}, + {'style': True, 'value': '...'}] + safety_policy_used, action_executed + ] + [nl] + scanned_items + [nl] + [using_sentence] + [scanned_count_sentence] + [old_timestamp] + \ + [current_timestamp] + + return brief_info + + +def build_using_sentence(key, db): + key_sentence = [] + + if key: + key_sentence = [{'style': True, 'value': 'an API KEY'}, + {'style': False, 'value': ' and the '}] + db_name = 'PyUp Commercial' + else: + db_name = 'non-commercial' + + if db: + db_name = "local file {0}".format(db) + + database_sentence = [{'style': True, 'value': db_name + ' database'}] + + return [{'style': False, 'value': 'Using '}] + key_sentence + database_sentence + + +def build_scanned_count_sentence(packages): + scanned_count = 'No packages found' + if len(packages) >= 1: + scanned_count = 'Found and scanned {0} {1}'.format(len(packages), + 'packages' if len(packages) > 1 else 'package') + + return [{'style': True, 'value': scanned_count}] + + +def add_warnings_if_needed(brief_info): + ctx = SafetyContext() + warnings = [] + + if ctx.packages: + if ctx.params.get('continue_on_error', False): + warnings += [[{'style': True, + 'value': '* Continue-on-error is enabled, so returning successful (0) exit code in all cases.'}]] + + if ctx.params.get('ignore_severity_rules', False) and not is_using_api_key(): + warnings += [[{'style': True, + 'value': '* Could not filter by severity, please upgrade your account to include severity data.'}]] + + if warnings: + brief_info += [[{'style': False, 'value': ''}]] + warnings + + +def get_report_brief_info(as_dict=False, report_type=1, **kwargs): + LOG.info('get_report_brief_info: %s, %s, %s', as_dict, report_type, kwargs) + + context = SafetyContext() + + packages = [pkg for pkg in context.packages if isinstance(pkg, Package)] + brief_data = {} + command = context.command + + if command == 'review': + review = build_report_for_review_vuln_report(as_dict) + return review + + key = context.key + db = context.db_mirror + + scanning_types = {'check': {'name': 'Vulnerabilities', 'action': 'Scanning dependencies', 'scanning_target': 'environment'}, # Files, Env or Stdin + 'license': {'name': 'Licenses', 'action': 'Scanning licenses', 'scanning_target': 'environment'}, # Files or Env + 'review': {'name': 'Report', 'action': 'Reading the report', + 'scanning_target': 'file'}} # From file + + targets = ['stdin', 'environment', 'files', 'file'] + for target in targets: + if context.params.get(target, False): + scanning_types[command]['scanning_target'] = target + break + + scanning_target = scanning_types.get(context.command, {}).get('scanning_target', '') + brief_data['scan_target'] = scanning_target + scanned_items, data = get_printable_list_of_scanned_items(scanning_target) + brief_data['scanned'] = data + nl = [{'style': False, 'value': ''}] + + action_executed = [ + {'style': True, 'value': scanning_types.get(context.command, {}).get('action', '')}, + {'style': False, 'value': ' in your '}, + {'style': True, 'value': scanning_target + ':'}, + ] + + policy_file = context.params.get('policy_file', None) + safety_policy_used = [] + + brief_data['policy_file'] = policy_file.get('filename', '-') if policy_file else None + brief_data['policy_file_source'] = 'server' if brief_data['policy_file'] and 'server-safety-policy' in brief_data['policy_file'] else 'local' + + if policy_file and policy_file.get('filename', False): + safety_policy_used = [ + {'style': False, 'value': '\nScanning using a security policy file'}, + {'style': True, 'value': ' {0}'.format(policy_file.get('filename', '-'))}, + ] + + audit_and_monitor = [] + if context.params.get('audit_and_monitor'): + logged_url = context.params.get('audit_and_monitor_url') if context.params.get('audit_and_monitor_url') else "https://pyup.io" + audit_and_monitor = [ + {'style': False, 'value': '\nLogging scan results to'}, + {'style': True, 'value': ' {0}'.format(logged_url)}, + ] + + current_time = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + + brief_data['api_key'] = bool(key) + brief_data['local_database_path'] = db if db else None + brief_data['safety_version'] = get_safety_version() + brief_data['timestamp'] = current_time + brief_data['packages_found'] = len(packages) + # Vuln report + additional_data = [] + if report_type == 1: + brief_data['vulnerabilities_found'] = kwargs.get('vulnerabilities_found', 0) + brief_data['vulnerabilities_ignored'] = kwargs.get('vulnerabilities_ignored', 0) + brief_data['remediations_recommended'] = 0 + + additional_data = [ + [{'style': True, 'value': str(brief_data['vulnerabilities_found'])}, + {'style': True, 'value': f' vulnerabilit{"y" if brief_data["vulnerabilities_found"] == 1 else "ies"} found'}], + [{'style': True, 'value': str(brief_data['vulnerabilities_ignored'])}, + {'style': True, 'value': f' vulnerabilit{"y" if brief_data["vulnerabilities_ignored"] == 1 else "ies"} ignored'}], + ] + + if is_using_api_key(): + brief_data['remediations_recommended'] = kwargs.get('remediations_recommended', 0) + additional_data.extend( + [[{'style': True, 'value': str(brief_data['remediations_recommended'])}, + {'style': True, 'value': + f' remediation{"" if brief_data["remediations_recommended"] == 1 else "s"} recommended'}]]) + + elif report_type == 2: + brief_data['licenses_found'] = kwargs.get('licenses_found', 0) + additional_data = [ + [{'style': True, 'value': str(brief_data['licenses_found'])}, + {'style': True, 'value': f' license {"type" if brief_data["licenses_found"] == 1 else "types"} found'}], + ] + + brief_data['telemetry'] = build_telemetry_data() + + brief_data['git'] = build_git_data() + brief_data['project'] = context.params.get('project', None) + + brief_data['json_version'] = 1 + + using_sentence = build_using_sentence(key, db) + scanned_count_sentence = build_scanned_count_sentence(packages) + + timestamp = [{'style': False, 'value': 'Timestamp '}, {'style': True, 'value': current_time}] + + brief_info = [[{'style': False, 'value': 'Safety '}, + {'style': True, 'value': 'v' + get_safety_version()}, + {'style': False, 'value': ' is scanning for '}, + {'style': True, 'value': scanning_types.get(context.command, {}).get('name', '')}, + {'style': True, 'value': '...'}] + safety_policy_used + audit_and_monitor, action_executed + ] + [nl] + scanned_items + [nl] + [using_sentence] + [scanned_count_sentence] + [timestamp] + + brief_info.extend(additional_data) + + add_warnings_if_needed(brief_info) + + LOG.info('Brief info data: %s', brief_data) + LOG.info('Brief info, styled output: %s', '\n\n LINE ---->\n ' + '\n\n LINE ---->\n '.join(map(str, brief_info))) + + return brief_data if as_dict else brief_info + + +def build_primary_announcement(primary_announcement, columns=None, only_text=False): + lines = json.loads(primary_announcement.get('message')) + + for line in lines: + if 'words' not in line: + raise ValueError('Missing words keyword') + if len(line['words']) <= 0: + raise ValueError('No words in this line') + for word in line['words']: + if 'value' not in word or not word['value']: + raise ValueError('Empty word or without value') + + message = style_lines(lines, columns, start_line='', end_line='') + + return click.unstyle(message) if only_text else message + + +def is_using_api_key(): + return bool(SafetyContext().key) + + +def is_using_a_safety_policy_file(): + return bool(SafetyContext().params.get('policy_file', None)) + + +def should_add_nl(output, found_vulns): + if output == 'bare' and not found_vulns: + return False + + return True + diff --git a/pipenv/patched/safety/safety-policy-template.yml b/pipenv/patched/safety/safety-policy-template.yml new file mode 100644 index 0000000000..43fa60c1f4 --- /dev/null +++ b/pipenv/patched/safety/safety-policy-template.yml @@ -0,0 +1,14 @@ +# Safety Security and License Configuration file +# We recommend checking this file into your source control in the root of your Python project +# If this file is named .safety-policy.yml and is in the same directory where you run `safety check` it will be used by default. +# Otherwise, you can use the flag `safety check --policy-file ` to specify a custom location and name for the file. +# To validate and review your policy file, run the validate command: `safety validate policy_file --path ` +security: # configuration for the `safety check` command + ignore-cvss-severity-below: 0 # A severity number between 0 and 10. Some helpful reference points: 9=ignore all vulnerabilities except CRITICAL severity. 7=ignore all vulnerabilities except CRITICAL & HIGH severity. 4=ignore all vulnerabilities except CRITICAL, HIGH & MEDIUM severity. + ignore-cvss-unknown-severity: False # True or False. We recommend you set this to False. + ignore-vulnerabilities: # Here you can list multiple specific vulnerabilities you want to ignore (optionally for a time period) + # We recommend making use of the optional `reason` and `expires` keys for each vulnerability that you ignore. + 25853: # Example vulnerability ID + reason: we don't use the vulnerable function # optional, for internal note purposes to communicate with your team. This reason will be reported in the Safety reports + expires: '2022-10-21' # datetime string - date this ignore will expire, best practice to use this variable + continue-on-vulnerability-error: False # Suppress non-zero exit codes when vulnerabilities are found. Enable this in pipelines and CI/CD processes if you want to pass builds that have vulnerabilities. We recommend you set this to False. diff --git a/pipenv/patched/safety/safety.py b/pipenv/patched/safety/safety.py index f03ba9a0df..bef32c9b5e 100644 --- a/pipenv/patched/safety/safety.py +++ b/pipenv/patched/safety/safety.py @@ -1,42 +1,57 @@ # -*- coding: utf-8 -*- import errno +import itertools import json +import logging import os +import sys import time -from collections import namedtuple +from datetime import datetime import pipenv.patched.pip._vendor.requests as requests from pipenv.patched.pip._vendor.packaging.specifiers import SpecifierSet +from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.pip._vendor.packaging.version import parse as parse_version, Version, LegacyVersion, parse -from .constants import (API_MIRRORS, CACHE_FILE, CACHE_LICENSES_VALID_SECONDS, - CACHE_VALID_SECONDS, OPEN_MIRRORS, REQUEST_TIMEOUT) +from .constants import (API_MIRRORS, CACHE_FILE, OPEN_MIRRORS, REQUEST_TIMEOUT, API_BASE_URL) from .errors import (DatabaseFetchError, DatabaseFileNotFoundError, - InvalidKeyError, TooManyRequestsError) -from .util import RequirementFile + InvalidKeyError, TooManyRequestsError, NetworkConnectionError, + RequestTimeoutError, ServerError, MalformedDatabase) +from .models import Vulnerability, CVE, Severity +from .util import RequirementFile, read_requirements, Package, build_telemetry_data, sync_safety_context, SafetyContext, \ + validate_expiration_date +session = requests.session() -class Vulnerability(namedtuple("Vulnerability", - ["name", "spec", "version", "advisory", "vuln_id", "cvssv2", "cvssv3"])): - pass +LOG = logging.getLogger(__name__) -def get_from_cache(db_name): +def get_from_cache(db_name, cache_valid_seconds=0): + LOG.debug('Trying to get from cache...') if os.path.exists(CACHE_FILE): + LOG.info('Cache file path: %s', CACHE_FILE) with open(CACHE_FILE) as f: try: data = json.loads(f.read()) + LOG.debug('Trying to get the %s from the cache file', db_name) + LOG.debug('Databases in CACHE file: %s', ', '.join(data)) if db_name in data: - if "cached_at" in data[db_name]: - if 'licenses.json' in db_name: - # Getting the specific cache time for the licenses db. - cache_valid_seconds = CACHE_LICENSES_VALID_SECONDS - else: - cache_valid_seconds = CACHE_VALID_SECONDS + LOG.debug('db_name %s', db_name) + if "cached_at" in data[db_name]: if data[db_name]["cached_at"] + cache_valid_seconds > time.time(): + LOG.debug('Getting the database from cache at %s, cache setting: %s', + data[db_name]["cached_at"], cache_valid_seconds) return data[db_name]["db"] + + LOG.debug('Cached file is too old, it was cached at %s', data[db_name]["cached_at"]) + else: + LOG.debug('There is not the cached_at key in %s database', data[db_name]) + except json.JSONDecodeError: - pass + LOG.debug('JSONDecodeError trying to get the cached database.') + else: + LOG.debug("Cache file doesn't exist...") return False @@ -58,7 +73,9 @@ def write_to_cache(db_name, data): os.makedirs(os.path.dirname(CACHE_FILE)) with open(CACHE_FILE, "w") as _: _.write(json.dumps({})) + LOG.debug('Cache file created') except OSError as exc: # Guard against race condition + LOG.debug('Unable to create the cache file because: %s', exc.errno) if exc.errno != errno.EEXIST: raise @@ -66,6 +83,7 @@ def write_to_cache(db_name, data): try: cache = json.loads(f.read()) except json.JSONDecodeError: + LOG.debug('JSONDecodeError in the local cache, dumping the full cache file.') cache = {} with open(CACHE_FILE, "w") as f: @@ -74,41 +92,122 @@ def write_to_cache(db_name, data): "db": data } f.write(json.dumps(cache)) + LOG.debug('Safety updated the cache file for %s database.', db_name) -def fetch_database_url(mirror, db_name, key, cached, proxy): - +def fetch_database_url(mirror, db_name, key, cached, proxy, telemetry=True): headers = {} if key: headers["X-Api-Key"] = key + if not proxy: + proxy = {} + if cached: - cached_data = get_from_cache(db_name=db_name) + cached_data = get_from_cache(db_name=db_name, cache_valid_seconds=cached) if cached_data: + LOG.info('Database %s returned from cache.', db_name) return cached_data url = mirror + db_name - r = requests.get(url=url, timeout=REQUEST_TIMEOUT, headers=headers, proxies=proxy) - if r.status_code == 200: + + telemetry_data = {'telemetry': json.dumps(build_telemetry_data(telemetry=telemetry))} + + try: + r = session.get(url=url, timeout=REQUEST_TIMEOUT, headers=headers, proxies=proxy, params=telemetry_data) + except requests.exceptions.ConnectionError: + raise NetworkConnectionError() + except requests.exceptions.Timeout: + raise RequestTimeoutError() + except requests.exceptions.RequestException: + raise DatabaseFetchError() + + if r.status_code == 403: + raise InvalidKeyError(key=key, reason=r.text) + + if r.status_code == 429: + raise TooManyRequestsError(reason=r.text) + + if r.status_code != 200: + raise ServerError(reason=r.reason) + + try: data = r.json() - if cached: - write_to_cache(db_name, data) - return data - elif r.status_code == 403: - raise InvalidKeyError() - elif r.status_code == 429: - raise TooManyRequestsError() + except json.JSONDecodeError as e: + raise MalformedDatabase(reason=e) + + if cached: + LOG.info('Writing %s to cache because cached value was %s', db_name, cached) + write_to_cache(db_name, data) + + return data + + +def fetch_policy(key, proxy): + url = f"{API_BASE_URL}policy/" + headers = {"X-Api-Key": key} + + if not proxy: + proxy = {} + + try: + LOG.debug(f'Getting policy') + r = session.get(url=url, timeout=REQUEST_TIMEOUT, headers=headers, proxies=proxy) + LOG.debug(r.text) + return r.json() + except: + import pipenv.vendor.click as click + + LOG.exception("Error fetching policy") + click.secho( + "Warning: couldn't fetch policy from pyup.io.", + fg="yellow", + file=sys.stderr + ) + + return {"safety_policy": "", "audit_and_monitor": False} + + +def post_results(key, proxy, safety_json, policy_file): + url = f"{API_BASE_URL}result/" + headers = {"X-Api-Key": key} + + if not proxy: + proxy = {} + + # safety_json is in text form already. policy_file is a text YAML + audit_report = { + "safety_json": json.loads(safety_json), + "policy_file": policy_file + } + + try: + LOG.debug(f'Posting results: {audit_report}') + r = session.post(url=url, timeout=REQUEST_TIMEOUT, headers=headers, proxies=proxy, json=audit_report) + LOG.debug(r.text) + + return r.json() + except: + import pipenv.vendor.click as click + + LOG.exception("Error posting results") + click.secho( + "Warning: couldn't upload results to pyup.io.", + fg="yellow", + file=sys.stderr + ) + + return {} def fetch_database_file(path, db_name): full_path = os.path.join(path, db_name) if not os.path.exists(full_path): - raise DatabaseFileNotFoundError() + raise DatabaseFileNotFoundError(db=path) with open(full_path) as f: return json.loads(f.read()) -def fetch_database(full=False, key=False, db=False, cached=False, proxy={}): - +def fetch_database(full=False, key=False, db=False, cached=0, proxy=None, telemetry=True): if db: mirrors = [db] else: @@ -118,7 +217,7 @@ def fetch_database(full=False, key=False, db=False, cached=False, proxy={}): for mirror in mirrors: # mirror can either be a local path or a URL if mirror.startswith("http://") or mirror.startswith("https://"): - data = fetch_database_url(mirror, db_name=db_name, key=key, cached=cached, proxy=proxy) + data = fetch_database_url(mirror, db_name=db_name, key=key, cached=cached, proxy=proxy, telemetry=telemetry) else: data = fetch_database_file(mirror, db_name=db_name) if data: @@ -133,12 +232,90 @@ def get_vulnerabilities(pkg, spec, db): yield entry -def check(packages, key, db_mirror, cached, ignore_ids, proxy): - key = key if key else os.environ.get("SAFETY_API_KEY", False) - db = fetch_database(key=key, db=db_mirror, cached=cached, proxy=proxy) +def get_vulnerability_from(vuln_id, cve, data, specifier, db, name, pkg, ignore_vulns): + base_domain = db.get('$meta', {}).get('base_domain') + pkg_meta = db.get('$meta', {}).get('packages', {}).get(name, {}) + insecure_versions = pkg_meta.get("insecure_versions", []) + secure_versions = pkg_meta.get("secure_versions", []) + latest_version_without_known_vulnerabilities = pkg_meta.get("latest_secure_version", None) + latest_version = pkg_meta.get("latest_version", None) + pkg_refreshed = pkg._replace(insecure_versions=insecure_versions, secure_versions=secure_versions, + latest_version_without_known_vulnerabilities=latest_version_without_known_vulnerabilities, + latest_version=latest_version, + more_info_url=f"{base_domain}{pkg_meta.get('more_info_path', '')}") + + ignored = (ignore_vulns and vuln_id in ignore_vulns and ( + not ignore_vulns[vuln_id]['expires'] or ignore_vulns[vuln_id]['expires'] > datetime.utcnow())) + more_info_url = f"{base_domain}{data.get('more_info_path', '')}" + severity = None + + if cve and cve.cvssv2 or cve.cvssv3: + severity = Severity(source=cve.name, cvssv2=cve.cvssv2, cvssv3=cve.cvssv3) + + return Vulnerability( + vulnerability_id=vuln_id, + package_name=name, + pkg=pkg_refreshed, + ignored=ignored, + ignored_reason=ignore_vulns.get(vuln_id, {}).get('reason', None) if ignore_vulns else None, + ignored_expires=ignore_vulns.get(vuln_id, {}).get('expires', None) if ignore_vulns else None, + vulnerable_spec=specifier, + all_vulnerable_specs=data.get("specs", []), + analyzed_version=pkg_refreshed.version, + advisory=data.get("advisory"), + is_transitive=data.get("transitive", False), + published_date=data.get("published_date"), + fixed_versions=[ver for ver in data.get("fixed_versions", []) if ver], + closest_versions_without_known_vulnerabilities=data.get("closest_secure_versions", []), + resources=data.get("vulnerability_resources"), + CVE=cve, + severity=severity, + affected_versions=data.get("affected_versions", []), + more_info_url=more_info_url + ) + + +def get_cve_from(data, db_full): + cve_id = data.get("cve", '').split(",")[0].strip() + cve_meta = db_full.get("$meta", {}).get("cve", {}).get(cve_id, {}) + return CVE(name=cve_id, cvssv2=cve_meta.get("cvssv2", None), cvssv3=cve_meta.get("cvssv3", None)) + + +def ignore_vuln_if_needed(vuln_id, cve, ignore_vulns, ignore_severity_rules): + + if not ignore_severity_rules or not isinstance(ignore_vulns, dict): + return + + severity = None + + if cve.cvssv2 and cve.cvssv2.get("base_score", None): + severity = cve.cvssv2.get("base_score", None) + + if cve.cvssv3 and cve.cvssv3.get("base_score", None): + severity = cve.cvssv3.get("base_score", None) + + ignore_severity_below = float(ignore_severity_rules.get('ignore-cvss-severity-below', 0.0)) + ignore_unknown_severity = bool(ignore_severity_rules.get('ignore-cvss-unknown-severity', False)) + + if severity: + if float(severity) < ignore_severity_below: + reason = 'Ignored by severity rule in policy file, {0} < {1}'.format(float(severity), + ignore_severity_below) + ignore_vulns[vuln_id] = {'reason': reason, 'expires': None} + elif ignore_unknown_severity: + reason = 'Unknown CVSS severity, ignored by severity rule in policy file.' + ignore_vulns[vuln_id] = {'reason': reason, 'expires': None} + + +@sync_safety_context +def check(packages, key=False, db_mirror=False, cached=0, ignore_vulns=None, ignore_severity_rules=None, proxy=None, + include_ignored=False, is_env_scan=True, telemetry=True, params=None, project=None): + SafetyContext().command = 'check' + db = fetch_database(key=key, db=db_mirror, cached=cached, proxy=proxy, telemetry=telemetry) db_full = None vulnerable_packages = frozenset(db.keys()) - vulnerable = [] + vulnerabilities = [] + for pkg in packages: # Ignore recursive files not resolved if isinstance(pkg, RequirementFile): @@ -146,7 +323,7 @@ def check(packages, key, db_mirror, cached, ignore_ids, proxy): # normalize the package name, the safety-db is converting underscores to dashes and uses # lowercase - name = pkg.key.replace("_", "-").lower() + name = canonicalize_name(pkg.name) if name in vulnerable_packages: # we have a candidate here, build the spec set @@ -154,51 +331,175 @@ def check(packages, key, db_mirror, cached, ignore_ids, proxy): spec_set = SpecifierSet(specifiers=specifier) if spec_set.contains(pkg.version): if not db_full: - db_full = fetch_database(full=True, key=key, db=db_mirror, cached=cached, proxy=proxy) + db_full = fetch_database(full=True, key=key, db=db_mirror, cached=cached, proxy=proxy, + telemetry=telemetry) for data in get_vulnerabilities(pkg=name, spec=specifier, db=db_full): vuln_id = data.get("id").replace("pyup.io-", "") - cve_id = data.get("cve") - if cve_id: - cve_id = cve_id.split(",")[0].strip() - if vuln_id and vuln_id not in ignore_ids: - cve_meta = db_full.get("$meta", {}).get("cve", {}).get(cve_id, {}) - vulnerable.append( - Vulnerability( - name=name, - spec=specifier, - version=pkg.version, - advisory=data.get("advisory"), - vuln_id=vuln_id, - cvssv2=cve_meta.get("cvssv2", None), - cvssv3=cve_meta.get("cvssv3", None), - ) - ) - return vulnerable - - -def review(vulnerabilities): + cve = get_cve_from(data, db_full) + + ignore_vuln_if_needed(vuln_id, cve, ignore_vulns, ignore_severity_rules) + + vulnerability = get_vulnerability_from(vuln_id, cve, data, specifier, db, name, pkg, + ignore_vulns) + + should_add_vuln = not (vulnerability.is_transitive and is_env_scan) + + if (include_ignored or vulnerability.vulnerability_id not in ignore_vulns) and should_add_vuln: + vulnerabilities.append(vulnerability) + + return vulnerabilities, db_full + + +def precompute_remediations(remediations, package_metadata, vulns, + ignored_vulns): + for vuln in vulns: + if vuln.ignored: + ignored_vulns.add(vuln.vulnerability_id) + continue + + if vuln.package_name in remediations.keys(): + remediations[vuln.package_name]['vulns_found'] = remediations[vuln.package_name].get('vulns_found', 0) + 1 + else: + vulns_count = 1 + package_metadata[vuln.package_name] = {'insecure_versions': vuln.pkg.insecure_versions, + 'secure_versions': vuln.pkg.secure_versions, 'version': vuln.pkg.version} + remediations[vuln.package_name] = {'vulns_found': vulns_count, 'version': vuln.pkg.version, + 'more_info_url': vuln.pkg.more_info_url} + + +def get_closest_ver(versions, version): + results = {'minor': None, 'major': None} + if not version or not versions: + return results + + sorted_versions = sorted(versions, key=lambda ver: parse_version(ver), reverse=True) + + for v in sorted_versions: + index = parse_version(v) + current_v = parse_version(version) + + if index > current_v: + results['major'] = index + + if index < current_v: + results['minor'] = index + break + + return results + + +def compute_sec_ver_for_user(package, ignored_vulns, db_full): + pkg_meta = db_full.get('$meta', {}).get('packages', {}).get(package, {}) + versions = set(pkg_meta.get("insecure_versions", []) + pkg_meta.get("secure_versions", [])) + affected_versions = [] + + for vuln in db_full.get(package, []): + vuln_id = vuln.get('id', None) + if vuln_id and vuln_id not in ignored_vulns: + affected_versions += vuln.get('affected_versions', []) + + affected_v = set(affected_versions) + sec_ver_for_user = list(versions.difference(affected_v)) + + return sorted(sec_ver_for_user, key=lambda ver: parse_version(ver), reverse=True) + + +def compute_sec_ver(remediations, package_metadata, ignored_vulns, db_full): + """ + Compute the secure_versions and the closest_secure_version for each remediation using the affected_versions + of each no ignored vulnerability of the same package, there is only a remediation for each package. + """ + for pkg_name in remediations.keys(): + pkg = package_metadata.get(pkg_name, {}) + + if not ignored_vulns: + secure_v = pkg.get('secure_versions', []) + else: + secure_v = compute_sec_ver_for_user(package=pkg_name, ignored_vulns=ignored_vulns, db_full=db_full) + + remediations[pkg_name]['secure_versions'] = secure_v + remediations[pkg_name]['closest_secure_version'] = get_closest_ver(secure_v, + pkg.get('version', None)) + + +def calculate_remediations(vulns, db_full): + remediations = {} + package_metadata = {} + ignored_vulns = set() + + if not db_full: + return remediations + + precompute_remediations(remediations, package_metadata, vulns, ignored_vulns) + compute_sec_ver(remediations, package_metadata, ignored_vulns, db_full) + + return remediations + + +@sync_safety_context +def review(report=None, params=None): + SafetyContext().command = 'review' vulnerable = [] + vulnerabilities = report.get('vulnerabilities', []) + report.get('ignored_vulnerabilities', []) + remediations = {} + + for key, value in report.get('remediations', {}).items(): + recommended = value.get('recommended_version', None) + secure_v = value.get('other_recommended_versions', []) + major = None + if recommended: + secure_v.append(recommended) + major = parse(recommended) + + remediations[key] = {'vulns_found': value.get('vulnerabilities_found', 0), + 'version': value.get('current_version'), + 'secure_versions': secure_v, + 'closest_secure_version': {'major': major, 'minor': None}, + # minor isn't supported in review + 'more_info_url': value.get('more_info_url')} + + packages = report.get('scanned_packages', []) + pkgs = {pkg_name: Package(**pkg_values) for pkg_name, pkg_values in packages.items()} + ctx = SafetyContext() + found_packages = list(pkgs.values()) + ctx.packages = found_packages + ctx.review = report.get('report_meta', []) + ctx.key = ctx.review.get('api_key', False) + cvssv2 = None + cvssv3 = None + for vuln in vulnerabilities: - current_vuln = { - "name": vuln[0], - "spec": vuln[1], - "version": vuln[2], - "advisory": vuln[3], - "vuln_id": vuln[4], - "cvssv2": None, - "cvssv3": None - } - vulnerable.append( - Vulnerability(**current_vuln) - ) - return vulnerable + vuln['pkg'] = pkgs.get(vuln.get('package_name', None)) + XVE_ID = vuln.get('CVE', None) # Trying to get first the CVE ID + + severity = vuln.get('severity', None) + if severity and severity.get('source', False): + cvssv2 = severity.get('cvssv2', None) + cvssv3 = severity.get('cvssv3', None) + # Trying to get the PVE ID if it exists, otherwise it will be the same CVE ID of above + XVE_ID = severity.get('source', False) + vuln['severity'] = Severity(source=XVE_ID, cvssv2=cvssv2, cvssv3=cvssv3) + else: + vuln['severity'] = None + + ignored_expires = vuln.get('ignored_expires', None) + + if ignored_expires: + vuln['ignored_expires'] = validate_expiration_date(ignored_expires) + + vuln['CVE'] = CVE(name=XVE_ID, cvssv2=cvssv2, cvssv3=cvssv3) if XVE_ID else None + + vulnerable.append(Vulnerability(**vuln)) + + return vulnerable, remediations, found_packages -def get_licenses(key, db_mirror, cached, proxy): +@sync_safety_context +def get_licenses(key=False, db_mirror=False, cached=0, proxy=None, telemetry=True): key = key if key else os.environ.get("SAFETY_API_KEY", False) if not key and not db_mirror: - raise InvalidKeyError("The API-KEY was not provided.") + raise InvalidKeyError(message="The API-KEY was not provided.") if db_mirror: mirrors = [db_mirror] else: @@ -209,9 +510,78 @@ def get_licenses(key, db_mirror, cached, proxy): for mirror in mirrors: # mirror can either be a local path or a URL if mirror.startswith("http://") or mirror.startswith("https://"): - licenses = fetch_database_url(mirror, db_name=db_name, key=key, cached=cached, proxy=proxy) + licenses = fetch_database_url(mirror, db_name=db_name, key=key, cached=cached, proxy=proxy, + telemetry=telemetry) else: licenses = fetch_database_file(mirror, db_name=db_name) if licenses: return licenses raise DatabaseFetchError() + + +def get_announcements(key, proxy, telemetry=True): + LOG.info('Getting announcements') + + body = build_telemetry_data(telemetry=telemetry) + + announcements = [] + headers = {} + + if key: + headers["X-Api-Key"] = key + + url = f"{API_BASE_URL}announcements/" + + LOG.debug(f'Telemetry body sent: {body}') + + try: + r = session.post(url=url, json=body, headers=headers, timeout=2, proxies=proxy) + LOG.debug(r.text) + except Exception as e: + LOG.info('Unexpected but HANDLED Exception happened getting the announcements: %s', e) + return announcements + + if r.status_code == 200: + try: + announcements = r.json() + if 'announcements' in announcements.keys(): + announcements = announcements.get('announcements', []) + else: + LOG.info('There is not announcements key in the JSON response, is this a wrong structure?') + announcements = [] + + except json.JSONDecodeError as e: + LOG.info('Unexpected but HANDLED Exception happened decoding the announcement response: %s', e) + + LOG.info('Announcements fetched') + + return announcements + + +def get_packages(files=False, stdin=False): + + if files: + return list(itertools.chain.from_iterable(read_requirements(f, resolve=True) for f in files)) + + if stdin: + return list(read_requirements(sys.stdin)) + + import pkg_resources + + return [ + Package(name=d.key, version=d.version, found=d.location, insecure_versions=[], secure_versions=[], + latest_version=None, latest_version_without_known_vulnerabilities=None, more_info_url=None) for d in + pkg_resources.working_set + if d.key not in {"python", "wsgiref", "argparse"} + ] + + +def read_vulnerabilities(fh): + try: + data = json.load(fh) + except json.JSONDecodeError as e: + raise MalformedDatabase(reason=e, fetched_from=fh.name) + except TypeError as e: + raise MalformedDatabase(reason=e, fetched_from=fh.name) + + return data diff --git a/pipenv/patched/safety/util.py b/pipenv/patched/safety/util.py index 46afa91591..cf4847fe77 100644 --- a/pipenv/patched/safety/util.py +++ b/pipenv/patched/safety/util.py @@ -1,17 +1,26 @@ -from pipenv.vendor.dparse.parser import setuptools_parse_requirements_backport as _parse_requirements -from collections import namedtuple -from pipenv.patched.pip._vendor.packaging.version import parse as parse_version -import pipenv.vendor.click as click -import sys -import json +import logging import os -Package = namedtuple("Package", ["key", "version"]) -RequirementFile = namedtuple("RequirementFile", ["path"]) +import platform + +import sys +from datetime import datetime +from difflib import SequenceMatcher +from threading import Lock +from typing import List +import pipenv.vendor.click as click +from pipenv.vendor.click import BadParameter +from pipenv.vendor.dparse.parser import setuptools_parse_requirements_backport as _parse_requirements +from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name +from pipenv.patched.pip._vendor.packaging.version import parse as parse_version +from pipenv.vendor.ruamel.yaml import YAML +from pipenv.vendor.ruamel.yaml.error import MarkedYAMLError +import pipenv.patched.safety as safety -def read_vulnerabilities(fh): - return json.load(fh) +from pipenv.patched.safety.constants import EXIT_CODE_FAILURE, EXIT_CODE_OK +from pipenv.patched.safety.models import Package, RequirementFile +LOG = logging.getLogger(__name__) def iter_lines(fh, lineno=0): for line in fh.readlines()[lineno:]: @@ -90,7 +99,10 @@ def read_requirements(fh, resolve=False): req, = parse_line(parseable_line) if len(req.specifier._specs) == 1 and \ next(iter(req.specifier._specs))._spec[0] == "==": - yield Package(key=req.name, version=next(iter(req.specifier._specs))._spec[1]) + yield Package(name=req.name, version=next(iter(req.specifier._specs))._spec[1], + found='temp_file' if is_temp_file else fh.name, insecure_versions=[], + secure_versions=[], latest_version=None, + latest_version_without_known_vulnerabilities=None, more_info_url=None) else: try: fname = fh.name @@ -108,15 +120,11 @@ def read_requirements(fh, resolve=False): continue -def get_proxy_dict(proxyprotocol, proxyhost, proxyport): - proxy_dictionary = {} - if proxyhost is not None: - if proxyprotocol in ["http", "https"]: - proxy_dictionary = {proxyprotocol: "{0}://{1}:{2}".format(proxyprotocol, proxyhost, str(proxyport))} - else: - click.secho("Proxy Protocol should be http or https only.", fg="red") - sys.exit(-1) - return proxy_dictionary +def get_proxy_dict(proxy_protocol, proxy_host, proxy_port): + if proxy_protocol and proxy_host and proxy_port: + # Safety only uses https request, so only https dict will be passed to requests + return {'https': f"{proxy_protocol}://{proxy_host}:{str(proxy_port)}"} + return None def get_license_name_by_id(license_id, db): @@ -126,13 +134,525 @@ def get_license_name_by_id(license_id, db): return name return None -def get_packages_licenses(packages, licenses_db): - """Get the licenses for the specified packages based on their version. + +def get_flags_from_context(): + flags = {} + context = click.get_current_context(silent=True) + + if context: + for option in context.command.params: + flags_per_opt = option.opts + option.secondary_opts + for flag in flags_per_opt: + flags[flag] = option.name + + return flags + + +def get_used_options(): + flags = get_flags_from_context() + used_options = {} + + for arg in sys.argv: + cleaned_arg = arg if '=' not in arg else arg.split('=')[0] + if cleaned_arg in flags: + option_used = flags.get(cleaned_arg) + + if option_used in used_options: + used_options[option_used][cleaned_arg] = used_options[option_used].get(cleaned_arg, 0) + 1 + else: + used_options[option_used] = {cleaned_arg: 1} + + return used_options + + +def get_safety_version(): + from pipenv.patched.safety import VERSION + return VERSION + + +def get_primary_announcement(announcements): + for announcement in announcements: + if announcement.get('type', '').lower() == 'primary_announcement': + try: + from pipenv.patched.safety.output_utils import build_primary_announcement + build_primary_announcement(announcement, columns=80) + except Exception as e: + LOG.debug(f'Failed to build primary announcement: {str(e)}') + return None + + return announcement + + return None + + +def get_basic_announcements(announcements): + return [announcement for announcement in announcements if + announcement.get('type', '').lower() != 'primary_announcement'] + + +def build_telemetry_data(telemetry=True): + context = SafetyContext() + + body = { + 'os_type': os.environ.get("SAFETY_OS_TYPE", None) or platform.system(), + 'os_release': os.environ.get("SAFETY_OS_RELEASE", None) or platform.release(), + 'os_description': os.environ.get("SAFETY_OS_DESCRIPTION", None) or platform.platform(), + 'python_version': platform.python_version(), + 'safety_command': context.command, + 'safety_options': get_used_options() + } if telemetry else {} + + body['safety_version'] = get_safety_version() + body['safety_source'] = os.environ.get("SAFETY_SOURCE", None) or context.safety_source + + LOG.debug(f'Telemetry body built: {body}') + + return body + + +def build_git_data(): + import subprocess + + try: + is_git = subprocess.run(["git", "rev-parse", "--is-inside-work-tree"], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + except Exception: + is_git = False + + if is_git == "true": + result = { + "branch": "", + "tag": "", + "commit": "", + "dirty": "", + "origin": "" + } + + try: + result['branch'] = subprocess.run(["git", "symbolic-ref", "--short", "-q", "HEAD"], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + result['tag'] = subprocess.run(["git", "describe", "--tags", "--exact-match"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.decode('utf-8').strip() + + commit = subprocess.run(["git", "describe", '--match=""', '--always', '--abbrev=40', '--dirty'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + result['dirty'] = commit.endswith('-dirty') + result['commit'] = commit.split("-dirty")[0] + + result['origin'] = subprocess.run(["git", "remote", "get-url", "origin"], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + except Exception: + pass + + return result + else: + return { + "error": "not-git-repo" + } + + +def output_exception(exception, exit_code_output=True): + click.secho(str(exception), fg="red", file=sys.stderr) + + if exit_code_output: + exit_code = EXIT_CODE_FAILURE + if hasattr(exception, 'get_exit_code'): + exit_code = exception.get_exit_code() + else: + exit_code = EXIT_CODE_OK + + sys.exit(exit_code) + + +def get_processed_options(policy_file, ignore, ignore_severity_rules, exit_code): + if policy_file: + security = policy_file.get('security', {}) + source = click.get_current_context().get_parameter_source("exit_code") + + if not ignore: + ignore = security.get('ignore-vulnerabilities', {}) + if source == click.core.ParameterSource.DEFAULT: + exit_code = not security.get('continue-on-vulnerability-error', False) + ignore_cvss_below = security.get('ignore-cvss-severity-below', 0.0) + ignore_cvss_unknown = security.get('ignore-cvss-unknown-severity', False) + ignore_severity_rules = {'ignore-cvss-severity-below': ignore_cvss_below, + 'ignore-cvss-unknown-severity': ignore_cvss_unknown} + + return ignore, ignore_severity_rules, exit_code + + +class MutuallyExclusiveOption(click.Option): + def __init__(self, *args, **kwargs): + self.mutually_exclusive = set(kwargs.pop('mutually_exclusive', [])) + self.with_values = kwargs.pop('with_values', {}) + help = kwargs.get('help', '') + if self.mutually_exclusive: + ex_str = ', '.join(["{0} with values {1}".format(item, self.with_values.get(item)) if item in self.with_values else item for item in self.mutually_exclusive]) + kwargs['help'] = help + ( + ' NOTE: This argument is mutually exclusive with ' + ' arguments: [' + ex_str + '].' + ) + super(MutuallyExclusiveOption, self).__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + m_exclusive_used = self.mutually_exclusive.intersection(opts) + option_used = m_exclusive_used and self.name in opts + + exclusive_value_used = False + for used in m_exclusive_used: + value_used = opts.get(used, None) + if not isinstance(value_used, List): + value_used = [value_used] + if value_used and set(self.with_values.get(used, [])).intersection(value_used): + exclusive_value_used = True + + if option_used and (not self.with_values or exclusive_value_used): + options = ', '.join(self.opts) + prohibited = ''.join(["\n * --{0} with {1}".format(item, self.with_values.get( + item)) if item in self.with_values else f"\n * {item}" for item in self.mutually_exclusive]) + raise click.UsageError( + f"Illegal usage: `{options}` is mutually exclusive with: {prohibited}" + ) + + return super(MutuallyExclusiveOption, self).handle_parse_result( + ctx, + opts, + args + ) + + +class DependentOption(click.Option): + def __init__(self, *args, **kwargs): + self.required_options = set(kwargs.pop('required_options', [])) + help = kwargs.get('help', '') + if self.required_options: + ex_str = ', '.join(self.required_options) + kwargs['help'] = help + ( + ' NOTE: This argument requires the following flags ' + ' [' + ex_str + '].' + ) + super(DependentOption, self).__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + missing_required_arguments = self.required_options.difference(opts) and self.name in opts + + if missing_required_arguments: + raise click.UsageError( + "Illegal usage: `{}` needs the " + "arguments `{}`.".format( + self.name, + ', '.join(missing_required_arguments) + ) + ) + + return super(DependentOption, self).handle_parse_result( + ctx, + opts, + args + ) + + +def transform_ignore(ctx, param, value): + if isinstance(value, tuple): + return dict(zip(value, [{'reason': '', 'expires': None} for _ in range(len(value))])) + + return {} + + +def active_color_if_needed(ctx, param, value): + if value == 'screen': + ctx.color = True + + color = os.environ.get("SAFETY_COLOR", None) + + if color is not None: + ctx.color = bool(color) + + return value + + +def json_alias(ctx, param, value): + if value: + os.environ['SAFETY_OUTPUT'] = 'json' + return value + + +def bare_alias(ctx, param, value): + if value: + os.environ['SAFETY_OUTPUT'] = 'bare' + return value + + +def get_terminal_size(): + from shutil import get_terminal_size as t_size + # get_terminal_size can report 0, 0 if run from pseudo-terminal prior Python 3.11 versions + + columns = t_size().columns or 80 + lines = t_size().lines or 24 + + return os.terminal_size((columns, lines)) + + +def validate_expiration_date(expiration_date): + d = None + + if expiration_date: + try: + d = datetime.strptime(expiration_date, '%Y-%m-%d') + except ValueError as e: + pass + + try: + d = datetime.strptime(expiration_date, '%Y-%m-%d %H:%M:%S') + except ValueError as e: + pass + + return d + + +class SafetyPolicyFile(click.ParamType): + """ + Custom Safety Policy file to hold validations + """ + + name = "filename" + envvar_list_splitter = os.path.pathsep + + def __init__( + self, + mode: str = "r", + encoding: str = None, + errors: str = "strict", + ) -> None: + self.mode = mode + self.encoding = encoding + self.errors = errors + self.basic_msg = '\n' + click.style('Unable to load the Safety Policy file "{name}".', fg='red') + + def to_info_dict(self): + info_dict = super().to_info_dict() + info_dict.update(mode=self.mode, encoding=self.encoding) + return info_dict + + def fail_if_unrecognized_keys(self, used_keys, valid_keys, param=None, ctx=None, msg='{hint}', context_hint=''): + for keyword in used_keys: + if keyword not in valid_keys: + match = None + max_ratio = 0.0 + if isinstance(keyword, str): + for option in valid_keys: + ratio = SequenceMatcher(None, keyword, option).ratio() + if ratio > max_ratio: + match = option + max_ratio = ratio + + maybe_msg = f' Maybe you meant: {match}' if max_ratio > 0.7 else \ + f' Valid keywords in this level are: {", ".join(valid_keys)}' + + self.fail(msg.format(hint=f'{context_hint}"{keyword}" is not a valid keyword.{maybe_msg}'), param, ctx) + + def fail_if_wrong_bool_value(self, keyword, value, msg='{hint}'): + if value is not None and not isinstance(value, bool): + self.fail(msg.format(hint=f"'{keyword}' value needs to be a boolean. " + "You can use True, False, TRUE, FALSE, true or false")) + + def convert(self, value, param, ctx): + try: + + if hasattr(value, "read") or hasattr(value, "write"): + return value + + msg = self.basic_msg.format(name=value) + '\n' + click.style('HINT:', fg='yellow') + ' {hint}' + + f, should_close = click.types.open_stream( + value, self.mode, self.encoding, self.errors, atomic=False + ) + filename = '' + + try: + raw = f.read() + yaml = YAML(typ='safe', pure=False) + safety_policy = yaml.load(raw) + filename = f.name + except Exception as e: + show_parsed_hint = isinstance(e, MarkedYAMLError) + hint = str(e) + if show_parsed_hint: + hint = f'{str(e.problem).strip()} {str(e.context).strip()} {str(e.context_mark).strip()}' + + self.fail(msg.format(name=value, hint=hint), param, ctx) + + if not safety_policy or not isinstance(safety_policy, dict) or not safety_policy.get('security', None): + self.fail( + msg.format(hint='you are missing the security root tag'), param, ctx) + + security_config = safety_policy.get('security', {}) + security_keys = ['ignore-cvss-severity-below', 'ignore-cvss-unknown-severity', 'ignore-vulnerabilities', + 'continue-on-vulnerability-error'] + used_keys = security_config.keys() + + self.fail_if_unrecognized_keys(used_keys, security_keys, param=param, ctx=ctx, msg=msg, + context_hint='"security" -> ') + + ignore_cvss_security_below = security_config.get('ignore-cvss-severity-below', None) + + if ignore_cvss_security_below: + limit = 0.0 + + try: + limit = float(ignore_cvss_security_below) + except ValueError as e: + self.fail(msg.format(hint="'ignore-cvss-severity-below' value needs to be an integer or float.")) + + if limit < 0 or limit > 10: + self.fail(msg.format(hint="'ignore-cvss-severity-below' needs to be a value between 0 and 10")) + + continue_on_vulnerability_error = security_config.get('continue-on-vulnerability-error', None) + self.fail_if_wrong_bool_value('continue-on-vulnerability-error', continue_on_vulnerability_error, msg) + + ignore_cvss_unknown_severity = security_config.get('ignore-cvss-unknown-severity', None) + self.fail_if_wrong_bool_value('ignore-cvss-unknown-severity', ignore_cvss_unknown_severity, msg) + + ignore_vulns = safety_policy.get('security', {}).get('ignore-vulnerabilities', {}) + + if ignore_vulns: + if not isinstance(ignore_vulns, dict): + self.fail(msg.format(hint="Vulnerability IDs under the 'ignore-vulnerabilities' key, need to " + "follow the convention 'ID_NUMBER:', probably you are missing a colon.")) + + normalized = {} + + for ignored_vuln_id, config in ignore_vulns.items(): + ignored_vuln_config = config if config else {} + + if not isinstance(ignored_vuln_config, dict): + self.fail( + msg.format(hint=f"Wrong configuration under the vulnerability with ID: {ignored_vuln_id}")) + + context_msg = f'"security" -> "ignore-vulnerabilities" -> "{ignored_vuln_id}" -> ' + + self.fail_if_unrecognized_keys(ignored_vuln_config.keys(), ['reason', 'expires'], param=param, + ctx=ctx, msg=msg, context_hint=context_msg) + + reason = ignored_vuln_config.get('reason', '') + reason = str(reason) if reason else None + expires = ignored_vuln_config.get('expires', '') + expires = str(expires) if expires else None + + try: + if int(ignored_vuln_id) < 0: + raise ValueError('Negative Vulnerability ID') + except ValueError as e: + self.fail(msg.format( + hint=f"vulnerability id {ignored_vuln_id} under the 'ignore-vulnerabilities' root needs to " + f"be a positive integer") + ) + + # Validate expires + d = validate_expiration_date(expires) + + if expires and not d: + self.fail(msg.format(hint=f"{context_msg}expires: \"{expires}\" isn't a valid format " + f"for the expires keyword, " + "valid options are: YYYY-MM-DD or " + "YYYY-MM-DD HH:MM:SS") + ) + + normalized[str(ignored_vuln_id)] = {'reason': reason, 'expires': d} + + safety_policy['security']['ignore-vulnerabilities'] = normalized + safety_policy['filename'] = filename + safety_policy['raw'] = raw + else: + safety_policy['security']['ignore-vulnerabilities'] = {} + + return safety_policy + except BadParameter as expected_e: + raise expected_e + except Exception as e: + # Don't fail in the default case + if ctx and isinstance(e, OSError): + source = ctx.get_parameter_source("policy_file") + if e.errno == 2 and source == click.core.ParameterSource.DEFAULT and value == '.safety-policy.yml': + return None + + problem = click.style("Policy file YAML is not valid.") + hint = click.style("HINT: ", fg='yellow') + str(e) + self.fail(f"{problem}\n{hint}", param, ctx) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ): + """Return a special completion marker that tells the completion + system to use the shell to provide file path completions. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from pipenv.vendor.click.shell_completion import CompletionItem + + return [CompletionItem(incomplete, type="file")] + + +class SingletonMeta(type): + + _instances = {} + + _lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + cls._instances[cls] = instance + return cls._instances[cls] + + +class SafetyContext(metaclass=SingletonMeta): + packages = None + key = False + db_mirror = False + cached = None + ignore_vulns = None + ignore_severity_rules = None + proxy = None + include_ignored = False + telemetry = None + files = None + stdin = None + is_env_scan = None + command = None + review = None + params = {} + safety_source = 'code' + + +def sync_safety_context(f): + def new_func(*args, **kwargs): + ctx = SafetyContext() + + for attr in dir(ctx): + if attr in kwargs: + setattr(ctx, attr, kwargs.get(attr)) + + return f(*args, **kwargs) + + return new_func + + +@sync_safety_context +def get_packages_licenses(packages=None, licenses_db=None): + """Get the licenses for the specified packages based on their version. :param packages: packages list :param licenses_db: the licenses db in the raw form. :return: list of objects with the packages and their respectives licenses. """ + SafetyContext().command = 'license' + + if not packages: + packages = [] + if not licenses_db: + licenses_db = {} + packages_licenses_db = licenses_db.get('packages', {}) filtered_packages_licenses = [] @@ -141,7 +661,7 @@ def get_packages_licenses(packages, licenses_db): if isinstance(pkg, RequirementFile): continue # normalize the package name - pkg_name = pkg.key.replace("_", "-").lower() + pkg_name = canonicalize_name(pkg.name) # packages may have different licenses depending their version. pkg_licenses = packages_licenses_db.get(pkg_name, []) version_requested = parse_version(pkg.version) @@ -160,7 +680,7 @@ def get_packages_licenses(packages, licenses_db): if license_id: license_name = get_license_name_by_id(license_id, licenses_db) if not license_id or not license_name: - license_name = "N/A" + license_name = "unknown" filtered_packages_licenses.append({ "package": pkg_name, diff --git a/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth b/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth new file mode 100644 index 0000000000..68e19a260a --- /dev/null +++ b/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('ruamel',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('ruamel', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('ruamel', [os.path.dirname(p)])));m = m or sys.modules.setdefault('ruamel', types.ModuleType('ruamel'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth.LICENSE b/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth.LICENSE new file mode 100644 index 0000000000..678f5cc0e0 --- /dev/null +++ b/pipenv/vendor/ruamel.yaml-0.17.21-py3.9-nspkg.pth.LICENSE @@ -0,0 +1,21 @@ + The MIT License (MIT) + + Copyright (c) 2014-2022 Anthon van der Neut, Ruamel bvba + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/pipenv/vendor/ruamel.yaml.LICENSE b/pipenv/vendor/ruamel.yaml.LICENSE new file mode 100644 index 0000000000..678f5cc0e0 --- /dev/null +++ b/pipenv/vendor/ruamel.yaml.LICENSE @@ -0,0 +1,21 @@ + The MIT License (MIT) + + Copyright (c) 2014-2022 Anthon van der Neut, Ruamel bvba + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/pipenv/vendor/ruamel/yaml/__init__.py b/pipenv/vendor/ruamel/yaml/__init__.py new file mode 100644 index 0000000000..accbfb5a10 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/__init__.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +if False: # MYPY + from typing import Dict, Any # NOQA + +_package_data = dict( + full_package_name='ruamel.yaml', + version_info=(0, 17, 21), + __version__='0.17.21', + version_timestamp='2022-02-12 09:49:22', + author='Anthon van der Neut', + author_email='a.van.der.neut@ruamel.eu', + description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA + entry_points=None, + since=2014, + extras_require={ + ':platform_python_implementation=="CPython" and python_version<"3.11"': ['ruamel.yaml.clib>=0.2.6'], # NOQA + 'jinja2': ['ruamel.yaml.jinja2>=0.2'], + 'docs': ['ryd'], + }, + classifiers=[ + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Processing :: Markup', + 'Typing :: Typed', + ], + keywords='yaml 1.2 parser round-trip preserve quotes order config', + read_the_docs='yaml', + supported=[(3, 5)], # minimum + tox=dict( + env='*f', # f for 3.5 + fl8excl='_test/lib', + ), + # universal=True, + python_requires='>=3', + rtfd='yaml', +) # type: Dict[Any, Any] + + +version_info = _package_data['version_info'] +__version__ = _package_data['__version__'] + +try: + from .cyaml import * # NOQA + + __with_libyaml__ = True +except (ImportError, ValueError): # for Jython + __with_libyaml__ = False + +from pipenv.vendor.ruamel.yaml.main import * # NOQA diff --git a/pipenv/vendor/ruamel/yaml/anchor.py b/pipenv/vendor/ruamel/yaml/anchor.py new file mode 100644 index 0000000000..1deea78412 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/anchor.py @@ -0,0 +1,20 @@ +# coding: utf-8 +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +anchor_attrib = '_yaml_anchor' + + +class Anchor: + __slots__ = 'value', 'always_dump' + attrib = anchor_attrib + + def __init__(self): + # type: () -> None + self.value = None + self.always_dump = False + + def __repr__(self): + # type: () -> Any + ad = ', (always dump)' if self.always_dump else "" + return 'Anchor({!r}{})'.format(self.value, ad) diff --git a/pipenv/vendor/ruamel/yaml/comments.py b/pipenv/vendor/ruamel/yaml/comments.py new file mode 100644 index 0000000000..4c61452873 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/comments.py @@ -0,0 +1,1267 @@ +# coding: utf-8 + +""" +stuff to deal with comments and formatting on dict/list/ordereddict/set +these are not really related, formatting could be factored out as +a separate base +""" + +import sys +import copy + + +from pipenv.vendor.ruamel.yaml.compat import ordereddict +from pipenv.vendor.ruamel.yaml.compat import MutableSliceableSequence, _F, nprintf # NOQA +from pipenv.vendor.ruamel.yaml.scalarstring import ScalarString +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +from collections.abc import MutableSet, Sized, Set, Mapping + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +# fmt: off +__all__ = ['CommentedSeq', 'CommentedKeySeq', + 'CommentedMap', 'CommentedOrderedMap', + 'CommentedSet', 'comment_attrib', 'merge_attrib', + 'C_POST', 'C_PRE', 'C_SPLIT_ON_FIRST_BLANK', 'C_BLANK_LINE_PRESERVE_SPACE', + ] +# fmt: on + +# splitting of comments by the scanner +# an EOLC (End-Of-Line Comment) is preceded by some token +# an FLC (Full Line Comment) is a comment not preceded by a token, i.e. # is +# the first non-blank on line +# a BL is a blank line i.e. empty or spaces/tabs only +# bits 0 and 1 are combined, you can choose only one +C_POST = 0b00 +C_PRE = 0b01 +C_SPLIT_ON_FIRST_BLANK = 0b10 # as C_POST, but if blank line then C_PRE all lines before +# first blank goes to POST even if no following real FLC +# (first blank -> first of post) +# 0b11 -> reserved for future use +C_BLANK_LINE_PRESERVE_SPACE = 0b100 +# C_EOL_PRESERVE_SPACE2 = 0b1000 + + +class IDX: + # temporary auto increment, so rearranging is easier + def __init__(self): + # type: () -> None + self._idx = 0 + + def __call__(self): + # type: () -> Any + x = self._idx + self._idx += 1 + return x + + def __str__(self): + # type: () -> Any + return str(self._idx) + + +cidx = IDX() + +# more or less in order of subjective expected likelyhood +# the _POST and _PRE ones are lists themselves +C_VALUE_EOL = C_ELEM_EOL = cidx() +C_KEY_EOL = cidx() +C_KEY_PRE = C_ELEM_PRE = cidx() # not this is not value +C_VALUE_POST = C_ELEM_POST = cidx() # not this is not value +C_VALUE_PRE = cidx() +C_KEY_POST = cidx() +C_TAG_EOL = cidx() +C_TAG_POST = cidx() +C_TAG_PRE = cidx() +C_ANCHOR_EOL = cidx() +C_ANCHOR_POST = cidx() +C_ANCHOR_PRE = cidx() + + +comment_attrib = '_yaml_comment' +format_attrib = '_yaml_format' +line_col_attrib = '_yaml_line_col' +merge_attrib = '_yaml_merge' +tag_attrib = '_yaml_tag' + + +class Comment: + # using sys.getsize tested the Comment objects, __slots__ makes them bigger + # and adding self.end did not matter + __slots__ = 'comment', '_items', '_post', '_pre' + attrib = comment_attrib + + def __init__(self, old=True): + # type: (bool) -> None + self._pre = None if old else [] # type: ignore + self.comment = None # [post, [pre]] + # map key (mapping/omap/dict) or index (sequence/list) to a list of + # dict: post_key, pre_key, post_value, pre_value + # list: pre item, post item + self._items = {} # type: Dict[Any, Any] + # self._start = [] # should not put these on first item + self._post = [] # type: List[Any] # end of document comments + + def __str__(self): + # type: () -> str + if bool(self._post): + end = ',\n end=' + str(self._post) + else: + end = "" + return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end) + + def _old__repr__(self): + # type: () -> str + if bool(self._post): + end = ',\n end=' + str(self._post) + else: + end = "" + try: + ln = max([len(str(k)) for k in self._items]) + 1 + except ValueError: + ln = '' # type: ignore + it = ' '.join( + ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()] + ) + if it: + it = '\n ' + it + ' ' + return 'Comment(\n start={},\n items={{{}}}{})'.format(self.comment, it, end) + + def __repr__(self): + # type: () -> str + if self._pre is None: + return self._old__repr__() + if bool(self._post): + end = ',\n end=' + repr(self._post) + else: + end = "" + try: + ln = max([len(str(k)) for k in self._items]) + 1 + except ValueError: + ln = '' # type: ignore + it = ' '.join( + ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()] + ) + if it: + it = '\n ' + it + ' ' + return 'Comment(\n pre={},\n items={{{}}}{})'.format(self.pre, it, end) + + @property + def items(self): + # type: () -> Any + return self._items + + @property + def end(self): + # type: () -> Any + return self._post + + @end.setter + def end(self, value): + # type: (Any) -> None + self._post = value + + @property + def pre(self): + # type: () -> Any + return self._pre + + @pre.setter + def pre(self, value): + # type: (Any) -> None + self._pre = value + + def get(self, item, pos): + # type: (Any, Any) -> Any + x = self._items.get(item) + if x is None or len(x) < pos: + return None + return x[pos] # can be None + + def set(self, item, pos, value): + # type: (Any, Any, Any) -> Any + x = self._items.get(item) + if x is None: + self._items[item] = x = [None] * (pos + 1) + else: + while len(x) <= pos: + x.append(None) + assert x[pos] is None + x[pos] = value + + def __contains__(self, x): + # type: (Any) -> Any + # test if a substring is in any of the attached comments + if self.comment: + if self.comment[0] and x in self.comment[0].value: + return True + if self.comment[1]: + for c in self.comment[1]: + if x in c.value: + return True + for value in self.items.values(): + if not value: + continue + for c in value: + if c and x in c.value: + return True + if self.end: + for c in self.end: + if x in c.value: + return True + return False + + +# to distinguish key from None +def NoComment(): + # type: () -> None + pass + + +class Format: + __slots__ = ('_flow_style',) + attrib = format_attrib + + def __init__(self): + # type: () -> None + self._flow_style = None # type: Any + + def set_flow_style(self): + # type: () -> None + self._flow_style = True + + def set_block_style(self): + # type: () -> None + self._flow_style = False + + def flow_style(self, default=None): + # type: (Optional[Any]) -> Any + """if default (the flow_style) is None, the flow style tacked on to + the object explicitly will be taken. If that is None as well the + default flow style rules the format down the line, or the type + of the constituent values (simple -> flow, map/list -> block)""" + if self._flow_style is None: + return default + return self._flow_style + + +class LineCol: + """ + line and column information wrt document, values start at zero (0) + """ + + attrib = line_col_attrib + + def __init__(self): + # type: () -> None + self.line = None + self.col = None + self.data = None # type: Optional[Dict[Any, Any]] + + def add_kv_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def key(self, k): + # type: (Any) -> Any + return self._kv(k, 0, 1) + + def value(self, k): + # type: (Any) -> Any + return self._kv(k, 2, 3) + + def _kv(self, k, x0, x1): + # type: (Any, Any, Any) -> Any + if self.data is None: + return None + data = self.data[k] + return data[x0], data[x1] + + def item(self, idx): + # type: (Any) -> Any + if self.data is None: + return None + return self.data[idx][0], self.data[idx][1] + + def add_idx_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def __repr__(self): + # type: () -> str + return _F('LineCol({line}, {col})', line=self.line, col=self.col) # type: ignore + + +class Tag: + """store tag information for roundtripping""" + + __slots__ = ('value',) + attrib = tag_attrib + + def __init__(self): + # type: () -> None + self.value = None + + def __repr__(self): + # type: () -> Any + return '{0.__class__.__name__}({0.value!r})'.format(self) + + +class CommentedBase: + @property + def ca(self): + # type: () -> Any + if not hasattr(self, Comment.attrib): + setattr(self, Comment.attrib, Comment()) + return getattr(self, Comment.attrib) + + def yaml_end_comment_extend(self, comment, clear=False): + # type: (Any, bool) -> None + if comment is None: + return + if clear or self.ca.end is None: + self.ca.end = [] + self.ca.end.extend(comment) + + def yaml_key_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[1] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[1] = comment[1] + else: + r[1].extend(comment[0]) + r[0] = comment[0] + + def yaml_value_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[3] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[3] = comment[1] + else: + r[3].extend(comment[0]) + r[2] = comment[0] + + def yaml_set_start_comment(self, comment, indent=0): + # type: (Any, Any) -> None + """overwrites any preceding comment lines on an object + expects comment to be without `#` and possible have multiple lines + """ + from .error import CommentMark + from .tokens import CommentToken + + pre_comments = self._yaml_clear_pre_comment() # type: ignore + if comment[-1] == '\n': + comment = comment[:-1] # strip final newline if there + start_mark = CommentMark(indent) + for com in comment.split('\n'): + c = com.strip() + if len(c) > 0 and c[0] != '#': + com = '# ' + com + pre_comments.append(CommentToken(com + '\n', start_mark)) + + def yaml_set_comment_before_after_key( + self, key, before=None, indent=0, after=None, after_indent=None + ): + # type: (Any, Any, Any, Any, Any) -> None + """ + expects comment (before/after) to be without `#` and possible have multiple lines + """ + from pipenv.vendor.ruamel.yaml.error import CommentMark + from pipenv.vendor.ruamel.yaml.tokens import CommentToken + + def comment_token(s, mark): + # type: (Any, Any) -> Any + # handle empty lines as having no comment + return CommentToken(('# ' if s else "") + s + '\n', mark) + + if after_indent is None: + after_indent = indent + 2 + if before and (len(before) > 1) and before[-1] == '\n': + before = before[:-1] # strip final newline if there + if after and after[-1] == '\n': + after = after[:-1] # strip final newline if there + start_mark = CommentMark(indent) + c = self.ca.items.setdefault(key, [None, [], None, None]) + if before is not None: + if c[1] is None: + c[1] = [] + if before == '\n': + c[1].append(comment_token("", start_mark)) # type: ignore + else: + for com in before.split('\n'): + c[1].append(comment_token(com, start_mark)) # type: ignore + if after: + start_mark = CommentMark(after_indent) + if c[3] is None: + c[3] = [] + for com in after.split('\n'): + c[3].append(comment_token(com, start_mark)) # type: ignore + + @property + def fa(self): + # type: () -> Any + """format attribute + + set_flow_style()/set_block_style()""" + if not hasattr(self, Format.attrib): + setattr(self, Format.attrib, Format()) + return getattr(self, Format.attrib) + + def yaml_add_eol_comment(self, comment, key=NoComment, column=None): + # type: (Any, Optional[Any], Optional[Any]) -> None + """ + there is a problem as eol comments should start with ' #' + (but at the beginning of the line the space doesn't have to be before + the #. The column index is for the # mark + """ + from .tokens import CommentToken + from .error import CommentMark + + if column is None: + try: + column = self._yaml_get_column(key) + except AttributeError: + column = 0 + if comment[0] != '#': + comment = '# ' + comment + if column is None: + if comment[0] == '#': + comment = ' ' + comment + column = 0 + start_mark = CommentMark(column) + ct = [CommentToken(comment, start_mark), None] + self._yaml_add_eol_comment(ct, key=key) + + @property + def lc(self): + # type: () -> Any + if not hasattr(self, LineCol.attrib): + setattr(self, LineCol.attrib, LineCol()) + return getattr(self, LineCol.attrib) + + def _yaml_set_line_col(self, line, col): + # type: (Any, Any) -> None + self.lc.line = line + self.lc.col = col + + def _yaml_set_kv_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_kv_line_col(key, data) + + def _yaml_set_idx_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_idx_line_col(key, data) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + return None + return self.anchor + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + @property + def tag(self): + # type: () -> Any + if not hasattr(self, Tag.attrib): + setattr(self, Tag.attrib, Tag()) + return getattr(self, Tag.attrib) + + def yaml_set_tag(self, value): + # type: (Any) -> None + self.tag.value = value + + def copy_attributes(self, t, memo=None): + # type: (Any, Any) -> None + # fmt: off + for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib, + Tag.attrib, merge_attrib]: + if hasattr(self, a): + if memo is not None: + setattr(t, a, copy.deepcopy(getattr(self, a, memo))) + else: + setattr(t, a, getattr(self, a)) + # fmt: on + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + raise NotImplementedError + + def _yaml_get_pre_comment(self): + # type: () -> Any + raise NotImplementedError + + def _yaml_get_column(self, key): + # type: (Any) -> Any + raise NotImplementedError + + +class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_lst') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + list.__init__(self, *args, **kw) + + def __getsingleitem__(self, idx): + # type: (Any) -> Any + return list.__getitem__(self, idx) + + def __setsingleitem__(self, idx, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if idx < len(self): + if ( + isinstance(value, str) + and not isinstance(value, ScalarString) + and isinstance(self[idx], ScalarString) + ): + value = type(self[idx])(value) + list.__setitem__(self, idx, value) + + def __delsingleitem__(self, idx=None): + # type: (Any) -> Any + list.__delitem__(self, idx) + self.ca.items.pop(idx, None) # might not be there -> default value + for list_index in sorted(self.ca.items): + if list_index < idx: + continue + self.ca.items[list_index - 1] = self.ca.items.pop(list_index) + + def __len__(self): + # type: () -> int + return list.__len__(self) + + def insert(self, idx, val): + # type: (Any, Any) -> None + """the comments after the insertion have to move forward""" + list.insert(self, idx, val) + for list_index in sorted(self.ca.items, reverse=True): + if list_index < idx: + break + self.ca.items[list_index + 1] = self.ca.items.pop(list_index) + + def extend(self, val): + # type: (Any) -> None + list.extend(self, val) + + def __eq__(self, other): + # type: (Any) -> bool + return list.__eq__(self, other) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res.append(copy.deepcopy(k, memo)) + self.copy_attributes(res, memo=memo) + return res + + def __add__(self, other): + # type: (Any) -> Any + return list.__add__(self, other) + + def sort(self, key=None, reverse=False): + # type: (Any, bool) -> None + if key is None: + tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse) + list.__init__(self, [x[0] for x in tmp_lst]) + else: + tmp_lst = sorted( + zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse + ) + list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst]) + itm = self.ca.items + self.ca._items = {} + for idx, x in enumerate(tmp_lst): + old_index = x[1] + if old_index in itm: + self.ca.items[idx] = itm[old_index] + + def __repr__(self): + # type: () -> Any + return list.__repr__(self) + + +class CommentedKeySeq(tuple, CommentedBase): # type: ignore + """This primarily exists to be able to roundtrip keys that are sequences""" + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedMapView(Sized): + __slots__ = ('_mapping',) + + def __init__(self, mapping): + # type: (Any) -> None + self._mapping = mapping + + def __len__(self): + # type: () -> int + count = len(self._mapping) + return count + + +class CommentedMapKeysView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, key): + # type: (Any) -> Any + return key in self._mapping + + def __iter__(self): + # type: () -> Any # yield from self._mapping # not in py27, pypy + # for x in self._mapping._keys(): + for x in self._mapping: + yield x + + +class CommentedMapItemsView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, item): + # type: (Any) -> Any + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield (key, self._mapping[key]) + + +class CommentedMapValuesView(CommentedMapView): + __slots__ = () + + def __contains__(self, value): + # type: (Any) -> Any + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield self._mapping[key] + + +class CommentedMap(ordereddict, CommentedBase): + __slots__ = (Comment.attrib, '_ok', '_ref') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._ok = set() # type: MutableSet[Any] # own keys + self._ref = [] # type: List[CommentedMap] + ordereddict.__init__(self, *args, **kw) + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][2].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post, last = None, None, None + for x in self: + if pre is not None and x != key: + post = x + break + if x == key: + pre = last + last = x + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for k1 in self: + if k1 >= key: + break + if k1 not in self.ca.items: + continue + sel_idx = k1 + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def update(self, *vals, **kw): + # type: (Any, Any) -> None + try: + ordereddict.update(self, *vals, **kw) + except TypeError: + # probably a dict that is used + for x in vals[0]: + self[x] = vals[0][x] + if vals: + try: + self._ok.update(vals[0].keys()) # type: ignore + except AttributeError: + # assume one argument that is a list/tuple of two element lists/tuples + for x in vals[0]: + self._ok.add(x[0]) + if kw: + self._ok.add(*kw.keys()) + + def insert(self, pos, key, value, comment=None): + # type: (Any, Any, Any, Optional[Any]) -> None + """insert key value into given position + attach comment if provided + """ + keys = list(self.keys()) + [key] + ordereddict.insert(self, pos, key, value) + for keytmp in keys: + self._ok.add(keytmp) + for referer in self._ref: + for keytmp in keys: + referer.update_key_value(keytmp) + if comment is not None: + self.yaml_add_eol_comment(comment, key=key) + + def mlget(self, key, default=None, list_ok=False): + # type: (Any, Any, Any) -> Any + """multi-level get that expects dicts within dicts""" + if not isinstance(key, list): + return self.get(key, default) + # assume that the key is a list of recursively accessible dicts + + def get_one_level(key_list, level, d): + # type: (Any, Any, Any) -> Any + if not list_ok: + assert isinstance(d, dict) + if level >= len(key_list): + if level > len(key_list): + raise IndexError + return d[key_list[level - 1]] + return get_one_level(key_list, level + 1, d[key_list[level - 1]]) + + try: + return get_one_level(key, 1, self) + except KeyError: + return default + except (TypeError, IndexError): + if not list_ok: + raise + return default + + def __getitem__(self, key): + # type: (Any) -> Any + try: + return ordereddict.__getitem__(self, key) + except KeyError: + for merged in getattr(self, merge_attrib, []): + if key in merged[1]: + return merged[1][key] + raise + + def __setitem__(self, key, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if key in self: + if ( + isinstance(value, str) + and not isinstance(value, ScalarString) + and isinstance(self[key], ScalarString) + ): + value = type(self[key])(value) + ordereddict.__setitem__(self, key, value) + self._ok.add(key) + + def _unmerged_contains(self, key): + # type: (Any) -> Any + if key in self._ok: + return True + return None + + def __contains__(self, key): + # type: (Any) -> bool + return bool(ordereddict.__contains__(self, key)) + + def get(self, key, default=None): + # type: (Any, Any) -> Any + try: + return self.__getitem__(key) + except: # NOQA + return default + + def __repr__(self): + # type: () -> Any + return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict') + + def non_merged_items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + if x in self._ok: + yield x, ordereddict.__getitem__(self, x) + + def __delitem__(self, key): + # type: (Any) -> None + # for merged in getattr(self, merge_attrib, []): + # if key in merged[1]: + # value = merged[1][key] + # break + # else: + # # not found in merged in stuff + # ordereddict.__delitem__(self, key) + # for referer in self._ref: + # referer.update=_key_value(key) + # return + # + # ordereddict.__setitem__(self, key, value) # merge might have different value + # self._ok.discard(key) + self._ok.discard(key) + ordereddict.__delitem__(self, key) + for referer in self._ref: + referer.update_key_value(key) + + def __iter__(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def _keys(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def __len__(self): + # type: () -> int + return int(ordereddict.__len__(self)) + + def __eq__(self, other): + # type: (Any) -> bool + return bool(dict(self) == other) + + def keys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + def values(self): + # type: () -> Any + return CommentedMapValuesView(self) + + def _items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x, ordereddict.__getitem__(self, x) + + def items(self): + # type: () -> Any + return CommentedMapItemsView(self) + + @property + def merge(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + setattr(self, merge_attrib, []) + return getattr(self, merge_attrib) + + def copy(self): + # type: () -> Any + x = type(self)() # update doesn't work + for k, v in self._items(): + x[k] = v + self.copy_attributes(x) + return x + + def add_referent(self, cm): + # type: (Any) -> None + if cm not in self._ref: + self._ref.append(cm) + + def add_yaml_merge(self, value): + # type: (Any) -> None + for v in value: + v[1].add_referent(self) + for k, v in v[1].items(): + if ordereddict.__contains__(self, k): + continue + ordereddict.__setitem__(self, k, v) + self.merge.extend(value) + + def update_key_value(self, key): + # type: (Any) -> None + if key in self._ok: + return + for v in self.merge: + if key in v[1]: + ordereddict.__setitem__(self, key, v[1][key]) + return + ordereddict.__delitem__(self, key) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res[k] = copy.deepcopy(self[k], memo) + self.copy_attributes(res, memo=memo) + return res + + +# based on brownie mappings +@classmethod # type: ignore +def raise_immutable(cls, *args, **kwargs): + # type: (Any, *Any, **Any) -> None + raise TypeError('{} objects are immutable'.format(cls.__name__)) + + +class CommentedKeyMap(CommentedBase, Mapping): # type: ignore + __slots__ = Comment.attrib, '_od' + """This primarily exists to be able to roundtrip keys that are mappings""" + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + if hasattr(self, '_od'): + raise_immutable(self) + try: + self._od = ordereddict(*args, **kw) + except TypeError: + raise + + __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable + + # need to implement __getitem__, __iter__ and __len__ + def __getitem__(self, index): + # type: (Any) -> Any + return self._od[index] + + def __iter__(self): + # type: () -> Iterator[Any] + for x in self._od.__iter__(): + yield x + + def __len__(self): + # type: () -> int + return len(self._od) + + def __hash__(self): + # type: () -> Any + return hash(tuple(self.items())) + + def __repr__(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + return self._od.__repr__() + return 'ordereddict(' + repr(list(self._od.items())) + ')' + + @classmethod + def fromkeys(keys, v=None): + # type: (Any, Any) -> Any + return CommentedKeyMap(dict.fromkeys(keys, v)) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedOrderedMap(CommentedMap): + __slots__ = (Comment.attrib,) + + +class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA + __slots__ = Comment.attrib, 'odict' + + def __init__(self, values=None): + # type: (Any) -> None + self.odict = ordereddict() + MutableSet.__init__(self) + if values is not None: + self |= values # type: ignore + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def add(self, value): + # type: (Any) -> None + """Add an element.""" + self.odict[value] = None + + def discard(self, value): + # type: (Any) -> None + """Remove an element. Do not raise an exception if absent.""" + del self.odict[value] + + def __contains__(self, x): + # type: (Any) -> Any + return x in self.odict + + def __iter__(self): + # type: () -> Any + for x in self.odict: + yield x + + def __len__(self): + # type: () -> int + return len(self.odict) + + def __repr__(self): + # type: () -> str + return 'set({0!r})'.format(self.odict.keys()) + + +class TaggedScalar(CommentedBase): + # the value and style attributes are set during roundtrip construction + def __init__(self, value=None, style=None, tag=None): + # type: (Any, Any, Any) -> None + self.value = value + self.style = style + if tag is not None: + self.yaml_set_tag(tag) + + def __str__(self): + # type: () -> Any + return self.value + + +def dump_comments(d, name="", sep='.', out=sys.stdout): + # type: (Any, str, str, Any) -> None + """ + recursively dump comments, all but the toplevel preceded by the path + in dotted form x.0.a + """ + if isinstance(d, dict) and hasattr(d, 'ca'): + if name: + out.write('{} {}\n'.format(name, type(d))) + out.write('{!r}\n'.format(d.ca)) # type: ignore + for k in d: + dump_comments(d[k], name=(name + sep + str(k)) if name else k, sep=sep, out=out) + elif isinstance(d, list) and hasattr(d, 'ca'): + if name: + out.write('{} {}\n'.format(name, type(d))) + out.write('{!r}\n'.format(d.ca)) # type: ignore + for idx, k in enumerate(d): + dump_comments( + k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out + ) diff --git a/pipenv/vendor/ruamel/yaml/compat.py b/pipenv/vendor/ruamel/yaml/compat.py new file mode 100644 index 0000000000..a954cf2ad0 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/compat.py @@ -0,0 +1,268 @@ +# coding: utf-8 + +# partially from package six by Benjamin Peterson + +import sys +import os +import io +import traceback +from abc import abstractmethod +import collections.abc + + +# fmt: off +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA + from typing import Optional # NOQA +# fmt: on + +_DEFAULT_YAML_VERSION = (1, 2) + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict # type: ignore + + # to get the right name import ... as ordereddict doesn't do that + + +class ordereddict(OrderedDict): # type: ignore + if not hasattr(OrderedDict, 'insert'): + + def insert(self, pos, key, value): + # type: (int, Any, Any) -> None + if pos >= len(self): + self[key] = value + return + od = ordereddict() + od.update(self) + for k in od: + del self[k] + for index, old_key in enumerate(od): + if pos == index: + self[key] = value + self[old_key] = od[old_key] + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +# replace with f-strings when 3.5 support is dropped +# ft = '42' +# assert _F('abc {ft!r}', ft=ft) == 'abc %r' % ft +# 'abc %r' % ft -> _F('abc {ft!r}' -> f'abc {ft!r}' +def _F(s, *superfluous, **kw): + # type: (Any, Any, Any) -> Any + if superfluous: + raise TypeError + return s.format(**kw) + + +StringIO = io.StringIO +BytesIO = io.BytesIO + +if False: # MYPY + # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO] + # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore + StreamType = Any + + StreamTextType = StreamType # Union[Text, StreamType] + VersionType = Union[List[int], str, Tuple[int, int]] + +builtins_module = 'builtins' + + +def with_metaclass(meta, *bases): + # type: (Any, Any) -> Any + """Create a base class with a metaclass.""" + return meta('NewBase', bases, {}) + + +DBG_TOKEN = 1 +DBG_EVENT = 2 +DBG_NODE = 4 + + +_debug = None # type: Optional[int] +if 'RUAMELDEBUG' in os.environ: + _debugx = os.environ.get('RUAMELDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + + +if bool(_debug): + + class ObjectCounter: + def __init__(self): + # type: () -> None + self.map = {} # type: Dict[Any, Any] + + def __call__(self, k): + # type: (Any) -> None + self.map[k] = self.map.get(k, 0) + 1 + + def dump(self): + # type: () -> None + for k in sorted(self.map): + sys.stdout.write('{} -> {}'.format(k, self.map[k])) + + object_counter = ObjectCounter() + + +# used from yaml util when testing +def dbg(val=None): + # type: (Any) -> Any + global _debug + if _debug is None: + # set to true or false + _debugx = os.environ.get('YAMLDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + if val is None: + return _debug + return _debug & val + + +class Nprint: + def __init__(self, file_name=None): + # type: (Any) -> None + self._max_print = None # type: Any + self._count = None # type: Any + self._file_name = file_name + + def __call__(self, *args, **kw): + # type: (Any, Any) -> None + if not bool(_debug): + return + out = sys.stdout if self._file_name is None else open(self._file_name, 'a') + dbgprint = print # to fool checking for print statements by dv utility + kw1 = kw.copy() + kw1['file'] = out + dbgprint(*args, **kw1) + out.flush() + if self._max_print is not None: + if self._count is None: + self._count = self._max_print + self._count -= 1 + if self._count == 0: + dbgprint('forced exit\n') + traceback.print_stack() + out.flush() + sys.exit(0) + if self._file_name: + out.close() + + def set_max_print(self, i): + # type: (int) -> None + self._max_print = i + self._count = None + + def fp(self, mode='a'): + # type: (str) -> Any + out = sys.stdout if self._file_name is None else open(self._file_name, mode) + return out + + +nprint = Nprint() +nprintf = Nprint('/var/tmp/ruamel.yaml.log') + +# char checkers following production rules + + +def check_namespace_char(ch): + # type: (Any) -> bool + if '\x21' <= ch <= '\x7E': # ! to ~ + return True + if '\xA0' <= ch <= '\uD7FF': + return True + if ('\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': # excl. byte order mark + return True + if '\U00010000' <= ch <= '\U0010FFFF': + return True + return False + + +def check_anchorname_char(ch): + # type: (Any) -> bool + if ch in ',[]{}': + return False + return check_namespace_char(ch) + + +def version_tnf(t1, t2=None): + # type: (Any, Any) -> Any + """ + return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False + """ + from pipenv.vendor.ruamel.yaml import version_info # NOQA + + if version_info < t1: + return True + if t2 is not None and version_info < t2: + return None + return False + + +class MutableSliceableSequence(collections.abc.MutableSequence): # type: ignore + __slots__ = () + + def __getitem__(self, index): + # type: (Any) -> Any + if not isinstance(index, slice): + return self.__getsingleitem__(index) + return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore + + def __setitem__(self, index, value): + # type: (Any, Any) -> None + if not isinstance(index, slice): + return self.__setsingleitem__(index, value) + assert iter(value) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + if index.step is None: + del self[index.start : index.stop] + for elem in reversed(value): + self.insert(0 if index.start is None else index.start, elem) + else: + range_parms = index.indices(len(self)) + nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1 + # need to test before changing, in case TypeError is caught + if nr_assigned_items < len(value): + raise TypeError( + 'too many elements in value {} < {}'.format(nr_assigned_items, len(value)) + ) + elif nr_assigned_items > len(value): + raise TypeError( + 'not enough elements in value {} > {}'.format( + nr_assigned_items, len(value) + ) + ) + for idx, i in enumerate(range(*range_parms)): + self[i] = value[idx] + + def __delitem__(self, index): + # type: (Any) -> None + if not isinstance(index, slice): + return self.__delsingleitem__(index) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + for i in reversed(range(*index.indices(len(self)))): + del self[i] + + @abstractmethod + def __getsingleitem__(self, index): + # type: (Any) -> Any + raise IndexError + + @abstractmethod + def __setsingleitem__(self, index, value): + # type: (Any, Any) -> None + raise IndexError + + @abstractmethod + def __delsingleitem__(self, index): + # type: (Any) -> None + raise IndexError diff --git a/pipenv/vendor/ruamel/yaml/composer.py b/pipenv/vendor/ruamel/yaml/composer.py new file mode 100644 index 0000000000..da9da6675a --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/composer.py @@ -0,0 +1,243 @@ +# coding: utf-8 + +import warnings + +from pipenv.vendor.ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning +from pipenv.vendor.ruamel.yaml.compat import _F, nprint, nprintf # NOQA + +from pipenv.vendor.ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, +) +from pipenv.vendor.ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +__all__ = ['Composer', 'ComposerError'] + + +class ComposerError(MarkedYAMLError): + pass + + +class Composer: + def __init__(self, loader=None): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_composer', None) is None: + self.loader._composer = self + self.anchors = {} # type: Dict[Any, Any] + + @property + def parser(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + self.loader.parser + return self.loader._parser + + @property + def resolver(self): + # type: () -> Any + # assert self.loader._resolver is not None + if hasattr(self.loader, 'typ'): + self.loader.resolver + return self.loader._resolver + + def check_node(self): + # type: () -> Any + # Drop the STREAM-START event. + if self.parser.check_event(StreamStartEvent): + self.parser.get_event() + + # If there are more documents available? + return not self.parser.check_event(StreamEndEvent) + + def get_node(self): + # type: () -> Any + # Get the root node of the next document. + if not self.parser.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # type: () -> Any + # Drop the STREAM-START event. + self.parser.get_event() + + # Compose a document if the stream is not empty. + document = None # type: Any + if not self.parser.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.parser.check_event(StreamEndEvent): + event = self.parser.get_event() + raise ComposerError( + 'expected a single document in the stream', + document.start_mark, + 'but found another document', + event.start_mark, + ) + + # Drop the STREAM-END event. + self.parser.get_event() + + return document + + def compose_document(self): + # type: (Any) -> Any + # Drop the DOCUMENT-START event. + self.parser.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.parser.get_event() + + self.anchors = {} + return node + + def return_alias(self, a): + # type: (Any) -> Any + return a + + def compose_node(self, parent, index): + # type: (Any, Any) -> Any + if self.parser.check_event(AliasEvent): + event = self.parser.get_event() + alias = event.anchor + if alias not in self.anchors: + raise ComposerError( + None, + None, + _F('found undefined alias {alias!r}', alias=alias), + event.start_mark, + ) + return self.return_alias(self.anchors[alias]) + event = self.parser.peek_event() + anchor = event.anchor + if anchor is not None: # have an anchor + if anchor in self.anchors: + # raise ComposerError( + # "found duplicate anchor %r; first occurrence" + # % (anchor), self.anchors[anchor].start_mark, + # "second occurrence", event.start_mark) + ws = ( + '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence ' + '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark) + ) + warnings.warn(ws, ReusedAnchorWarning) + self.resolver.descend_resolver(parent, index) + if self.parser.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.parser.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.parser.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.resolver.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + # type: (Any) -> Any + event = self.parser.get_event() + tag = event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode( + tag, + event.value, + event.start_mark, + event.end_mark, + style=event.style, + comment=event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.parser.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + if node.comment is not None: + nprint( + 'Warning: unexpected end_event commment in sequence ' + 'node {}'.format(node.flow_style) + ) + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def compose_mapping_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(MappingNode, None, start_event.implicit) + node = MappingNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + while not self.parser.check_event(MappingEndEvent): + # key_event = self.parser.peek_event() + item_key = self.compose_node(node, None) + # if item_key in node.value: + # raise ComposerError("while composing a mapping", + # start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + # node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def check_end_doc_comment(self, end_event, node): + # type: (Any, Any) -> None + if end_event.comment and end_event.comment[1]: + # pre comments on an end_event, no following to move to + if node.comment is None: + node.comment = [None, None] + assert not isinstance(node, ScalarEvent) + # this is a post comment on a mapping node, add as third element + # in the list + node.comment.append(end_event.comment[1]) + end_event.comment[1] = None diff --git a/pipenv/vendor/ruamel/yaml/configobjwalker.py b/pipenv/vendor/ruamel/yaml/configobjwalker.py new file mode 100644 index 0000000000..e6fe578e1f --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/configobjwalker.py @@ -0,0 +1,14 @@ +# coding: utf-8 + +import warnings + +from pipenv.vendor.ruamel.yaml.util import configobj_walker as new_configobj_walker + +if False: # MYPY + from typing import Any # NOQA + + +def configobj_walker(cfg): + # type: (Any) -> Any + warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code') + return new_configobj_walker(cfg) diff --git a/pipenv/vendor/ruamel/yaml/constructor.py b/pipenv/vendor/ruamel/yaml/constructor.py new file mode 100644 index 0000000000..c066a0fe58 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/constructor.py @@ -0,0 +1,1845 @@ +# coding: utf-8 + +import datetime +import base64 +import binascii +import sys +import types +import warnings +from collections.abc import Hashable, MutableSequence, MutableMapping + +# fmt: off +from pipenv.vendor.ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning, + MantissaNoDotYAML1_1Warning) +from pipenv.vendor.ruamel.yaml.nodes import * # NOQA +from pipenv.vendor.ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode) +from pipenv.vendor.ruamel.yaml.compat import (_F, builtins_module, # NOQA + nprint, nprintf, version_tnf) +from pipenv.vendor.ruamel.yaml.compat import ordereddict + +from pipenv.vendor.ruamel.yaml.comments import * # NOQA +from pipenv.vendor.ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet, + CommentedKeySeq, CommentedSeq, TaggedScalar, + CommentedKeyMap, + C_KEY_PRE, C_KEY_EOL, C_KEY_POST, + C_VALUE_PRE, C_VALUE_EOL, C_VALUE_POST, + ) +from pipenv.vendor.ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString, + LiteralScalarString, FoldedScalarString, + PlainScalarString, ScalarString,) +from pipenv.vendor.ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from pipenv.vendor.ruamel.yaml.scalarfloat import ScalarFloat +from pipenv.vendor.ruamel.yaml.scalarbool import ScalarBoolean +from pipenv.vendor.ruamel.yaml.timestamp import TimeStamp +from pipenv.vendor.ruamel.yaml.util import timestamp_regexp, create_timestamp + +if False: # MYPY + from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA + + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError', 'RoundTripConstructor'] +# fmt: on + + +class ConstructorError(MarkedYAMLError): + pass + + +class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning): + pass + + +class DuplicateKeyError(MarkedYAMLError): + pass + + +class BaseConstructor: + + yaml_constructors = {} # type: Dict[Any, Any] + yaml_multi_constructors = {} # type: Dict[Any, Any] + + def __init__(self, preserve_quotes=None, loader=None): + # type: (Optional[bool], Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_constructor', None) is None: + self.loader._constructor = self + self.loader = loader + self.yaml_base_dict_type = dict + self.yaml_base_list_type = list + self.constructed_objects = {} # type: Dict[Any, Any] + self.recursive_objects = {} # type: Dict[Any, Any] + self.state_generators = [] # type: List[Any] + self.deep_construct = False + self._preserve_quotes = preserve_quotes + self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16)) + + @property + def composer(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.composer + try: + return self.loader._composer + except AttributeError: + sys.stdout.write('slt {}\n'.format(type(self))) + sys.stdout.write('slc {}\n'.format(self.loader._composer)) + sys.stdout.write('{}\n'.format(dir(self))) + raise + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + @property + def scanner(self): + # type: () -> Any + # needed to get to the expanded comments + if hasattr(self.loader, 'typ'): + return self.loader.scanner + return self.loader._scanner + + def check_data(self): + # type: () -> Any + # If there are more documents available? + return self.composer.check_node() + + def get_data(self): + # type: () -> Any + # Construct and return the next document. + if self.composer.check_node(): + return self.construct_document(self.composer.get_node()) + + def get_single_data(self): + # type: () -> Any + # Ensure that the stream contains a single document and construct it. + node = self.composer.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + # type: (Any) -> Any + data = self.construct_object(node) + while bool(self.state_generators): + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for _dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + return self.recursive_objects[node] + # raise ConstructorError( + # None, None, 'found unconstructable recursive node', node.start_mark + # ) + self.recursive_objects[node] = None + data = self.construct_non_recursive_object(node) + + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_non_recursive_object(self, node, tag=None): + # type: (Any, Optional[str]) -> Any + constructor = None # type: Any + tag_suffix = None + if tag is None: + tag = node.tag + if tag in self.yaml_constructors: + constructor = self.yaml_constructors[tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag.startswith(tag_prefix): + tag_suffix = tag[len(tag_prefix) :] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for _dummy in generator: + pass + else: + self.state_generators.append(generator) + return data + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, + None, + _F('expected a scalar node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + return node.value + + def construct_sequence(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, + None, + _F('expected a sequence node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + return [self.construct_object(child, deep=deep) for child in node.value] + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + total_mapping = self.yaml_base_dict_type() + if getattr(node, 'merge', None) is not None: + todo = [(node.merge, False), (node.value, False)] + else: + todo = [(node.value, True)] + for values, check in todo: + mapping = self.yaml_base_dict_type() # type: Dict[Any, Any] + for key_node, value_node in values: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + + value = self.construct_object(value_node, deep=deep) + if check: + if self.check_mapping_key(node, key_node, mapping, key, value): + mapping[key] = value + else: + mapping[key] = value + total_mapping.update(mapping) + return total_mapping + + def check_mapping_key(self, node, key_node, mapping, key, value): + # type: (Any, Any, Any, Any, Any) -> bool + """return True if key is unique""" + if key in mapping: + if not self.allow_duplicate_keys: + mk = mapping.get(key) + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}" with value "{}" ' + '(original value: "{}")'.format(key, value, mk), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + return False + return True + + def check_set_key(self, node, key_node, setting, key): + # type: (Any, Any, Any, Any, Any) -> None + if key in setting: + if not self.allow_duplicate_keys: + args = [ + 'while constructing a set', + node.start_mark, + 'found duplicate key "{}"'.format(key), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + + def construct_pairs(self, node, deep=False): + # type: (Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + # type: (Any, Any) -> None + if 'yaml_constructors' not in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + # type: (Any, Any) -> None + if 'yaml_multi_constructors' not in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + + +class SafeConstructor(BaseConstructor): + def construct_scalar(self, node): + # type: (Any) -> Any + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == 'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + merge = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + if merge: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping for merging, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping or list of mappings for merging, ' + 'but found {value_node_id!s}', + value_node_id=value_node.id, + ), + value_node.start_mark, + ) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if bool(merge): + node.merge = merge # separate merge keys to be able to update without duplicate + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + # type: (Any) -> Any + self.construct_scalar(node) + return None + + # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does + bool_values = { + 'yes': True, + 'no': False, + 'y': True, + 'n': False, + 'true': True, + 'false': False, + 'on': True, + 'off': False, + } + + def construct_yaml_bool(self, node): + # type: (Any) -> bool + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + # type: (Any) -> int + value_s = self.construct_scalar(node) + value_s = value_s.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + return sign * int(value_s[2:], 2) + elif value_s.startswith('0x'): + return sign * int(value_s[2:], 16) + elif value_s.startswith('0o'): + return sign * int(value_s[2:], 8) + elif self.resolver.processing_version == (1, 1) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version == (1, 1) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + return sign * int(value_s) + + inf_value = 1e300 + while inf_value != inf_value * inf_value: + inf_value *= inf_value + nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + # type: (Any) -> float + value_so = self.construct_scalar(node) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + elif value_s == '.nan': + return self.nan_value + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + if self.resolver.processing_version != (1, 2) and 'e' in value_s: + # value_s is lower case independent of input + mantissa, exponent = value_s.split('e') + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + return sign * float(value_s) + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + _F('failed to convert base64 data into ascii: {exc!s}', exc=exc), + node.start_mark, + ) + try: + return base64.decodebytes(value) + except binascii.Error as exc: + raise ConstructorError( + None, + None, + _F('failed to decode base64 data: {exc!s}', exc=exc), + node.start_mark, + ) + + timestamp_regexp = timestamp_regexp # moved to util 0.17.17 + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + if values is None: + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + return create_timestamp(**values) + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = ordereddict() + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + omap[key] = value + + def construct_yaml_pairs(self, node): + # type: (Any) -> Any + # Note: the same code as `construct_yaml_omap`. + pairs = [] # type: List[Any] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = set() # type: Set[Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + return value + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = self.yaml_base_list_type() # type: List[Any] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = self.yaml_base_dict_type() # type: Dict[Any, Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + # type: (Any) -> None + raise ConstructorError( + None, + None, + _F( + 'could not determine a constructor for the tag {node_tag!r}', node_tag=node.tag + ), + node.start_mark, + ) + + +SafeConstructor.add_constructor('tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor('tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor('tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp +) + +SafeConstructor.add_constructor('tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs +) + +SafeConstructor.add_constructor('tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor('tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor('tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor('tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) + + +class Constructor(SafeConstructor): + def construct_python_str(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + def construct_python_unicode(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + def construct_python_bytes(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + _F('failed to convert base64 data into ascii: {exc!s}', exc=exc), + node.start_mark, + ) + try: + return base64.decodebytes(value) + except binascii.Error as exc: + raise ConstructorError( + None, + None, + _F('failed to decode base64 data: {exc!s}', exc=exc), + node.start_mark, + ) + + def construct_python_long(self, node): + # type: (Any) -> int + val = self.construct_yaml_int(node) + return val + + def construct_python_complex(self, node): + # type: (Any) -> Any + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + # type: (Any) -> Any + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + try: + __import__(name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python module', + mark, + _F('cannot find module {name!r} ({exc!s})', name=name, exc=exc), + mark, + ) + return sys.modules[name] + + def find_python_name(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + if '.' in name: + lname = name.split('.') + lmodule_name = lname + lobject_name = [] # type: List[Any] + while len(lmodule_name) > 1: + lobject_name.insert(0, lmodule_name.pop()) + module_name = '.'.join(lmodule_name) + try: + __import__(module_name) + # object_name = '.'.join(object_name) + break + except ImportError: + continue + else: + module_name = builtins_module + lobject_name = [name] + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python object', + mark, + _F( + 'cannot find module {module_name!r} ({exc!s})', + module_name=module_name, + exc=exc, + ), + mark, + ) + module = sys.modules[module_name] + object_name = '.'.join(lobject_name) + obj = module + while lobject_name: + if not hasattr(obj, lobject_name[0]): + + raise ConstructorError( + 'while constructing a Python object', + mark, + _F( + 'cannot find {object_name!r} in the module {module_name!r}', + object_name=object_name, + module_name=module.__name__, + ), + mark, + ) + obj = getattr(obj, lobject_name.pop(0)) + return obj + + def construct_python_name(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python name', + node.start_mark, + _F('expected the empty value, but found {value!r}', value=value), + node.start_mark, + ) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python module', + node.start_mark, + _F('expected the empty value, but found {value!r}', value=value), + node.start_mark, + ) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + # type: (Any, Any, Any, Any, bool) -> Any + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + # type: (Any, Any) -> None + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} # type: Dict[Any, Any] + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # type: (Any, Any) -> Any + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + self.recursive_objects[node] = instance + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # type: (Any, Any, bool) -> Any + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} # type: Dict[Any, Any] + state = {} # type: Dict[Any, Any] + listitems = [] # type: List[Any] + dictitems = {} # type: Dict[Any, Any] + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if bool(state): + self.set_python_instance_state(instance, state) + if bool(listitems): + instance.extend(listitems) + if bool(dictitems): + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + # type: (Any, Any) -> Any + return self.construct_python_object_apply(suffix, node, newobj=True) + + +Constructor.add_constructor('tag:yaml.org,2002:python/none', Constructor.construct_yaml_null) + +Constructor.add_constructor('tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool) + +Constructor.add_constructor('tag:yaml.org,2002:python/str', Constructor.construct_python_str) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes +) + +Constructor.add_constructor('tag:yaml.org,2002:python/int', Constructor.construct_yaml_int) + +Constructor.add_constructor('tag:yaml.org,2002:python/long', Constructor.construct_python_long) + +Constructor.add_constructor('tag:yaml.org,2002:python/float', Constructor.construct_yaml_float) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex +) + +Constructor.add_constructor('tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple +) + +Constructor.add_constructor('tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/name:', Constructor.construct_python_name +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', Constructor.construct_python_module +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', Constructor.construct_python_object +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new +) + + +class RoundTripConstructor(SafeConstructor): + """need to store the comments on the node itself, + as well as on the items + """ + + def comment(self, idx): + # type: (Any) -> Any + assert self.loader.comment_handling is not None + x = self.scanner.comments[idx] + x.set_assigned() + return x + + def comments(self, list_of_comments, idx=None): + # type: (Any, Optional[Any]) -> Any + # hand in the comment and optional pre, eol, post segment + if list_of_comments is None: + return [] + if idx is not None: + if list_of_comments[idx] is None: + return [] + list_of_comments = list_of_comments[idx] + for x in list_of_comments: + yield self.comment(x) + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, + None, + _F('expected a scalar node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + + if node.style == '|' and isinstance(node.value, str): + lss = LiteralScalarString(node.value, anchor=node.anchor) + if self.loader and self.loader.comment_handling is None: + if node.comment and node.comment[1]: + lss.comment = node.comment[1][0] # type: ignore + else: + # NEWCMNT + if node.comment is not None and node.comment[1]: + # nprintf('>>>>nc1', node.comment) + # EOL comment after | + lss.comment = self.comment(node.comment[1][0]) # type: ignore + return lss + if node.style == '>' and isinstance(node.value, str): + fold_positions = [] # type: List[int] + idx = -1 + while True: + idx = node.value.find('\a', idx + 1) + if idx < 0: + break + fold_positions.append(idx - len(fold_positions)) + fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor) + if self.loader and self.loader.comment_handling is None: + if node.comment and node.comment[1]: + fss.comment = node.comment[1][0] # type: ignore + else: + # NEWCMNT + if node.comment is not None and node.comment[1]: + # nprintf('>>>>nc2', node.comment) + # EOL comment after > + fss.comment = self.comment(node.comment[1][0]) # type: ignore + if fold_positions: + fss.fold_pos = fold_positions # type: ignore + return fss + elif bool(self._preserve_quotes) and isinstance(node.value, str): + if node.style == "'": + return SingleQuotedScalarString(node.value, anchor=node.anchor) + if node.style == '"': + return DoubleQuotedScalarString(node.value, anchor=node.anchor) + if node.anchor: + return PlainScalarString(node.value, anchor=node.anchor) + return node.value + + def construct_yaml_int(self, node): + # type: (Any) -> Any + width = None # type: Any + value_su = self.construct_scalar(node) + try: + sx = value_su.rstrip('_') + underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any + except ValueError: + underscore = None + except IndexError: + underscore = None + value_s = value_su.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return BinaryInt( + sign * int(value_s[2:], 2), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0x'): + # default to lower-case if no a-fA-F in string + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + hex_fun = HexInt # type: Any + for ch in value_s[2:]: + if ch in 'ABCDEF': # first non-digit is capital + hex_fun = HexCapsInt + break + if ch in 'abcdef': + break + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return hex_fun( + sign * int(value_s[2:], 16), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0o'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return OctalInt( + sign * int(value_s[2:], 8), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif self.resolver.processing_version != (1, 2) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + elif self.resolver.processing_version > (1, 1) and value_s[0] == '0': + # not an octal, an integer with leading zero(s) + if underscore is not None: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore) + elif underscore: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt( + sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor + ) + elif node.anchor: + return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor) + else: + return sign * int(value_s) + + def construct_yaml_float(self, node): + # type: (Any) -> Any + def leading_zeros(v): + # type: (Any) -> int + lead0 = 0 + idx = 0 + while idx < len(v) and v[idx] in '0.': + if v[idx] == '0': + lead0 += 1 + idx += 1 + return lead0 + + # underscore = None + m_sign = False # type: Any + value_so = self.construct_scalar(node) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + m_sign = value_s[0] + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + if value_s == '.nan': + return self.nan_value + if self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + if 'e' in value_s: + try: + mantissa, exponent = value_so.split('e') + exp = 'e' + except ValueError: + mantissa, exponent = value_so.split('E') + exp = 'E' + if self.resolver.processing_version != (1, 2): + # value_s is lower case independent of input + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + lead0 = leading_zeros(mantissa) + width = len(mantissa) + prec = mantissa.find('.') + if m_sign: + width -= 1 + e_width = len(exponent) + e_sign = exponent[0] in '+-' + # nprint('sf', width, prec, m_sign, exp, e_width, e_sign) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + exp=exp, + e_width=e_width, + e_sign=e_sign, + anchor=node.anchor, + ) + width = len(value_so) + prec = value_so.index('.') # you can use index, this would not be float without dot + lead0 = leading_zeros(value_so) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + anchor=node.anchor, + ) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if isinstance(value, ScalarString): + return value + return value + + def construct_rt_sequence(self, node, seqtyp, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, + None, + _F('expected a sequence node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + ret_val = [] + if self.loader and self.loader.comment_handling is None: + if node.comment: + seqtyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + # this happens e.g. if you have a sequence element that is a flow-style + # mapping and that has no EOL comment but a following commentline or + # empty line + seqtyp.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc3', node.comment) + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + seqtyp.yaml_set_anchor(node.anchor) + for idx, child in enumerate(node.value): + if child.comment: + seqtyp._yaml_add_comment(child.comment, key=idx) + child.comment = None # if moved to sequence remove from child + ret_val.append(self.construct_object(child, deep=deep)) + seqtyp._yaml_set_idx_line_col( + idx, [child.start_mark.line, child.start_mark.column] + ) + return ret_val + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + + def constructed(value_node): + # type: (Any) -> Any + # If the contents of a merge are defined within the + # merge marker, then they won't have been constructed + # yet. But if they were already constructed, we need to use + # the existing object. + if value_node in self.constructed_objects: + value = self.constructed_objects[value_node] + else: + value = self.construct_object(value_node, deep=False) + return value + + # merge = [] + merge_map_list = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + if merge_map_list: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + merge_map_list.append((index, constructed(value_node))) + # self.flatten_mapping(value_node) + # merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + # submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping for merging, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + merge_map_list.append((index, constructed(subnode))) + # self.flatten_mapping(subnode) + # submerge.append(subnode.value) + # submerge.reverse() + # for value in submerge: + # merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping or list of mappings for merging, ' + 'but found {value_node_id!s}', + value_node_id=value_node.id, + ), + value_node.start_mark, + ) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + return merge_map_list + # if merge: + # node.value = merge + node.value + + def _sentinel(self): + # type: () -> None + pass + + def construct_mapping(self, node, maptyp, deep=False): # type: ignore + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + merge_map = self.flatten_mapping(node) + # mapping = {} + if self.loader and self.loader.comment_handling is None: + if node.comment: + maptyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + maptyp.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + # nprintf('nc4', node.comment, node.start_mark) + if maptyp.ca.pre is None: + maptyp.ca.pre = [] + for cmnt in self.comments(node.comment, 0): + maptyp.ca.pre.append(cmnt) + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + maptyp.yaml_set_anchor(node.anchor) + last_key, last_value = None, self._sentinel + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, MutableSequence): + key_s = CommentedKeySeq(key) + if key_node.flow_style is True: + key_s.fa.set_flow_style() + elif key_node.flow_style is False: + key_s.fa.set_block_style() + key = key_s + elif isinstance(key, MutableMapping): + key_m = CommentedKeyMap(key) + if key_node.flow_style is True: + key_m.fa.set_flow_style() + elif key_node.flow_style is False: + key_m.fa.set_block_style() + key = key_m + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + value = self.construct_object(value_node, deep=deep) + if self.check_mapping_key(node, key_node, maptyp, key, value): + if self.loader and self.loader.comment_handling is None: + if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]: + if last_value is None: + key_node.comment[0] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, value=last_key) + else: + key_node.comment[2] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, key=key) + key_node.comment = None + if key_node.comment: + maptyp._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + maptyp._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc5a', key, key_node.comment) + if key_node.comment[0]: + maptyp.ca.set(key, C_KEY_PRE, key_node.comment[0]) + if key_node.comment[1]: + maptyp.ca.set(key, C_KEY_EOL, key_node.comment[1]) + if key_node.comment[2]: + maptyp.ca.set(key, C_KEY_POST, key_node.comment[2]) + if value_node.comment: + nprintf('nc5b', key, value_node.comment) + if value_node.comment[0]: + maptyp.ca.set(key, C_VALUE_PRE, value_node.comment[0]) + if value_node.comment[1]: + maptyp.ca.set(key, C_VALUE_EOL, value_node.comment[1]) + if value_node.comment[2]: + maptyp.ca.set(key, C_VALUE_POST, value_node.comment[2]) + maptyp._yaml_set_kv_line_col( + key, + [ + key_node.start_mark.line, + key_node.start_mark.column, + value_node.start_mark.line, + value_node.start_mark.column, + ], + ) + maptyp[key] = value + last_key, last_value = key, value # could use indexing + # do this last, or <<: before a key will prevent insertion in instances + # of collections.OrderedDict (as they have no __contains__ + if merge_map: + maptyp.add_yaml_merge(merge_map) + + def construct_setting(self, node, typ, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + if self.loader and self.loader.comment_handling is None: + if node.comment: + typ._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + typ.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc6', node.comment) + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + typ.yaml_set_anchor(node.anchor) + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + # construct but should be null + value = self.construct_object(value_node, deep=deep) # NOQA + self.check_set_key(node, key_node, typ, key) + if self.loader and self.loader.comment_handling is None: + if key_node.comment: + typ._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + typ._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc7a', key_node.comment) + if value_node.comment: + nprintf('nc7b', value_node.comment) + typ.add(key) + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = CommentedSeq() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + # if node.comment: + # data._yaml_add_comment(node.comment) + yield data + data.extend(self.construct_rt_sequence(node, data)) + self.set_collection_style(data, node) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_mapping(node, data, deep=True) + self.set_collection_style(data, node) + + def set_collection_style(self, data, node): + # type: (Any, Any) -> None + if len(data) == 0: + return + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = SafeConstructor.construct_mapping(self, node, deep=True) + data.__setstate__(state) + else: + state = SafeConstructor.construct_mapping(self, node) + if hasattr(data, '__attrs_attrs__'): # issue 394 + data.__init__(**state) + else: + data.__dict__.update(state) + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + from pipenv.vendor.ruamel.yaml.anchor import Anchor + + if not templated_id(node.anchor): + if not hasattr(data, Anchor.attrib): + a = Anchor() + setattr(data, Anchor.attrib, a) + else: + a = getattr(data, Anchor.attrib) + a.value = node.anchor + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = CommentedOrderedMap() + omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + omap.fa.set_flow_style() + elif node.flow_style is False: + omap.fa.set_block_style() + yield omap + if self.loader and self.loader.comment_handling is None: + if node.comment: + omap._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + omap.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc8', node.comment) + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + if self.loader and self.loader.comment_handling is None: + if key_node.comment: + omap._yaml_add_comment(key_node.comment, key=key) + if subnode.comment: + omap._yaml_add_comment(subnode.comment, key=key) + if value_node.comment: + omap._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc9a', key_node.comment) + if subnode.comment: + nprintf('nc9b', subnode.comment) + if value_node.comment: + nprintf('nc9c', value_node.comment) + omap[key] = value + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = CommentedSet() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_setting(node, data) + + def construct_undefined(self, node): + # type: (Any) -> Any + try: + if isinstance(node, MappingNode): + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + data.yaml_set_tag(node.tag) + yield data + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + data.yaml_set_anchor(node.anchor) + self.construct_mapping(node, data) + return + elif isinstance(node, ScalarNode): + data2 = TaggedScalar() + data2.value = self.construct_scalar(node) + data2.style = node.style + data2.yaml_set_tag(node.tag) + yield data2 + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + data2.yaml_set_anchor(node.anchor, always_dump=True) + return + elif isinstance(node, SequenceNode): + data3 = CommentedSeq() + data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data3.fa.set_flow_style() + elif node.flow_style is False: + data3.fa.set_block_style() + data3.yaml_set_tag(node.tag) + yield data3 + if node.anchor: + from pipenv.vendor.ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + data3.yaml_set_anchor(node.anchor) + data3.extend(self.construct_sequence(node)) + return + except: # NOQA + pass + raise ConstructorError( + None, + None, + _F( + 'could not determine a constructor for the tag {node_tag!r}', node_tag=node.tag + ), + node.start_mark, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + if not values['hour']: + return create_timestamp(**values) + # return SafeConstructor.construct_yaml_timestamp(self, node, values) + for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']: + if values[part]: + break + else: + return create_timestamp(**values) + # return SafeConstructor.construct_yaml_timestamp(self, node, values) + dd = create_timestamp(**values) # this has delta applied + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # should check for None and solve issue 366 should be tzinfo=delta) + data = TimeStamp( + dd.year, dd.month, dd.day, dd.hour, dd.minute, dd.second, dd.microsecond + ) + if delta: + data._yaml['delta'] = delta + tz = values['tz_sign'] + values['tz_hour'] + if values['tz_minute']: + tz += ':' + values['tz_minute'] + data._yaml['tz'] = tz + else: + if values['tz']: # no delta + data._yaml['tz'] = values['tz'] + + if values['t']: + data._yaml['t'] = True + return data + + def construct_yaml_bool(self, node): + # type: (Any) -> Any + b = SafeConstructor.construct_yaml_bool(self, node) + if node.anchor: + return ScalarBoolean(b, anchor=node.anchor) + return b + + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map +) + +RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined) diff --git a/pipenv/vendor/ruamel/yaml/cyaml.py b/pipenv/vendor/ruamel/yaml/cyaml.py new file mode 100644 index 0000000000..3cd4bd5b0c --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/cyaml.py @@ -0,0 +1,183 @@ +# coding: utf-8 + +from _ruamel_yaml import CParser, CEmitter # type: ignore + +from pipenv.vendor.ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor +from pipenv.vendor.ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter +from pipenv.vendor.ruamel.yaml.resolver import Resolver, BaseResolver + +if False: # MYPY + from typing import Any, Union, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper'] + + +# this includes some hacks to solve the usage of resolver by lower level +# parts of the parser + + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + BaseConstructor.__init__(self, loader=self) + BaseResolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + SafeConstructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CLoader(CParser, Constructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + Constructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + self._emitter = self._serializer = self._representer = self + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + SafeRepresenter.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) + + +class CDumper(CEmitter, Representer, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + Representer.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) diff --git a/pipenv/vendor/ruamel/yaml/dumper.py b/pipenv/vendor/ruamel/yaml/dumper.py new file mode 100644 index 0000000000..7c6c57b255 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/dumper.py @@ -0,0 +1,219 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.emitter import Emitter +from pipenv.vendor.ruamel.yaml.serializer import Serializer +from pipenv.vendor.ruamel.yaml.representer import ( + Representer, + SafeRepresenter, + BaseRepresenter, + RoundTripRepresenter, +) +from pipenv.vendor.ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import StreamType, VersionType # NOQA + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper'] + + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class Dumper(Emitter, Serializer, Representer, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + Representer.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + RoundTripRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + VersionedResolver.__init__(self, loader=self) diff --git a/pipenv/vendor/ruamel/yaml/emitter.py b/pipenv/vendor/ruamel/yaml/emitter.py new file mode 100644 index 0000000000..bf4e8d1949 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/emitter.py @@ -0,0 +1,1772 @@ +# coding: utf-8 + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +import sys +from pipenv.vendor.ruamel.yaml.error import YAMLError, YAMLStreamError +from pipenv.vendor.ruamel.yaml.events import * # NOQA + +# fmt: off +from pipenv.vendor.ruamel.yaml.compat import _F, nprint, dbg, DBG_EVENT, \ + check_anchorname_char, nprintf # NOQA +# fmt: on + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import StreamType # NOQA + +__all__ = ['Emitter', 'EmitterError'] + + +class EmitterError(YAMLError): + pass + + +class ScalarAnalysis: + def __init__( + self, + scalar, + empty, + multiline, + allow_flow_plain, + allow_block_plain, + allow_single_quoted, + allow_double_quoted, + allow_block, + ): + # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + + +class Indents: + # replacement for the list based stack of None/int + def __init__(self): + # type: () -> None + self.values = [] # type: List[Tuple[Any, bool]] + + def append(self, val, seq): + # type: (Any, Any) -> None + self.values.append((val, seq)) + + def pop(self): + # type: () -> Any + return self.values.pop()[0] + + def last_seq(self): + # type: () -> bool + # return the seq(uence) value for the element added before the last one + # in increase_indent() + try: + return self.values[-2][1] + except IndexError: + return False + + def seq_flow_align(self, seq_indent, column, pre_comment=False): + # type: (int, int, Optional[bool]) -> int + # extra spaces because of dash + # nprint('seq_flow_align', self.values, pre_comment) + if len(self.values) < 2 or not self.values[-1][1]: + if len(self.values) == 0 or not pre_comment: + return 0 + base = self.values[-1][0] if self.values[-1][0] is not None else 0 + if pre_comment: + return base + seq_indent # type: ignore + # return (len(self.values)) * seq_indent + # -1 for the dash + return base + seq_indent - column - 1 # type: ignore + + def __len__(self): + # type: () -> int + return len(self.values) + + +class Emitter: + # fmt: off + DEFAULT_TAG_PREFIXES = { + '!': '!', + 'tag:yaml.org,2002:': '!!', + } + # fmt: on + + MAX_SIMPLE_KEY_LENGTH = 128 + + def __init__( + self, + stream, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + brace_single_entry_mapping_in_flow_sequence=None, + dumper=None, + ): + # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None: + self.dumper._emitter = self + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None # type: Optional[Text] + self.allow_space_break = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] # type: List[Any] + self.state = self.expect_stream_start # type: Any + + # Current event and the event queue. + self.events = [] # type: List[Any] + self.event = None # type: Any + + # The current indentation level and the stack of previous indents. + self.indents = Indents() + self.indent = None # type: Optional[int] + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + self.compact_seq_seq = True # dash after dash + self.compact_seq_map = True # key after dash + # self.compact_ms = False # dash after key, only when excplicit key with ? + self.no_newline = None # type: Optional[bool] # set if directly after `- ` + + # Whether the document requires an explicit document end indicator + self.open_ended = False + + # colon handling + self.colon = ':' + self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon + # single entry mappings in flow sequence + self.brace_single_entry_mapping_in_flow_sequence = ( + brace_single_entry_mapping_in_flow_sequence # NOQA + ) + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis + self.unicode_supplementary = sys.maxunicode > 0xFFFF + self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0 + self.top_level_colon_align = top_level_colon_align + self.best_sequence_indent = 2 + self.requested_indent = indent # specific for literal zero indent + if indent and 1 < indent < 10: + self.best_sequence_indent = indent + self.best_map_indent = self.best_sequence_indent + # if self.best_sequence_indent < self.sequence_dash_offset + 1: + # self.best_sequence_indent = self.sequence_dash_offset + 1 + self.best_width = 80 + if width and width > self.best_sequence_indent * 2: + self.best_width = width + self.best_line_break = '\n' # type: Any + if line_break in ['\r', '\n', '\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None # type: Any + + # Prepared anchor and tag. + self.prepared_anchor = None # type: Any + self.prepared_tag = None # type: Any + + # Scalar analysis and style. + self.analysis = None # type: Any + self.style = None # type: Any + + self.scalar_after_indicator = True # write a scalar on the same line as `---` + + self.alt_null = 'null' + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('output stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + if not hasattr(val, 'write'): + raise YAMLStreamError('stream argument needs to have a write() method') + self._stream = val + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def dispose(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + # type: (Any) -> None + if dbg(DBG_EVENT): + nprint(event) + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + # type: () -> bool + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + # type: (int) -> bool + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return len(self.events) < count + 1 + + def increase_indent(self, flow=False, sequence=None, indentless=False): + # type: (bool, Optional[bool], bool) -> None + self.indents.append(self.indent, sequence) + if self.indent is None: # top level + if flow: + # self.indent = self.best_sequence_indent if self.indents.last_seq() else \ + # self.best_map_indent + # self.indent = self.best_sequence_indent + self.indent = self.requested_indent + else: + self.indent = 0 + elif not indentless: + self.indent += ( + self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent + ) + # if self.indents.last_seq(): + # if self.indent == 0: # top level block sequence + # self.indent = self.best_sequence_indent - self.sequence_dash_offset + # else: + # self.indent += self.best_sequence_indent + # else: + # self.indent += self.best_map_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + # type: () -> None + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError( + _F('expected StreamStartEvent, but got {self_event!s}', self_event=self.event) + ) + + def expect_nothing(self): + # type: () -> None + raise EmitterError( + _F('expected nothing, but got {self_event!s}', self_event=self.event) + ) + + # Document handlers. + + def expect_first_document_start(self): + # type: () -> Any + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + # type: (bool) -> None + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator('...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = ( + first + and not self.event.explicit + and not self.canonical + and not self.event.version + and not self.event.tags + and not self.check_empty_document() + ) + if not implicit: + self.write_indent() + self.write_indicator('---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator('...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError( + _F( + 'expected DocumentStartEvent, but got {self_event!s}', + self_event=self.event, + ) + ) + + def expect_document_end(self): + # type: () -> None + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator('...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError( + _F('expected DocumentEndEvent, but got {self_event!s}', self_event=self.event) + ) + + def expect_document_root(self): + # type: () -> None + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): + # type: (bool, bool, bool, bool) -> None + self.root_context = root + self.sequence_context = sequence # not used in PyYAML + force_flow_indent = False + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + if ( + self.process_anchor('&') + and isinstance(self.event, ScalarEvent) + and self.sequence_context + ): + self.sequence_context = False + if ( + root + and isinstance(self.event, ScalarEvent) + and not self.scalar_after_indicator + ): + self.write_indent() + self.process_tag() + if isinstance(self.event, ScalarEvent): + # nprint('@', self.indention, self.no_newline, self.column) + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + # nprint('@', self.indention, self.no_newline, self.column) + i2, n2 = self.indention, self.no_newline # NOQA + if self.event.comment: + if self.event.flow_style is False: + if self.write_post_comment(self.event): + self.indention = False + self.no_newline = True + if self.event.flow_style: + column = self.column + if self.write_pre_comment(self.event): + if self.event.flow_style: + # force_flow_indent = True + force_flow_indent = not self.indents.values[-1][1] + self.indention = i2 + self.no_newline = not self.indention + if self.event.flow_style: + self.column = column + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_sequence() + ): + self.expect_flow_sequence(force_flow_indent) + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.event.flow_style is False and self.event.comment: + self.write_post_comment(self.event) + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + if self.event.flow_style: + force_flow_indent = not self.indents.values[-1][1] + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_mapping() + ): + self.expect_flow_mapping(single=self.event.nr_items == 1, + force_flow_indent=force_flow_indent) + else: + self.expect_block_mapping() + else: + raise EmitterError( + _F('expected NodeEvent, but got {self_event!s}', self_event=self.event) + ) + + def expect_alias(self): + # type: () -> None + if self.event.anchor is None: + raise EmitterError('anchor is not specified for alias') + self.process_anchor('*') + self.state = self.states.pop() + + def expect_scalar(self): + # type: () -> None + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self, force_flow_indent=False): + # type: (Optional[bool]) -> None + if force_flow_indent: + self.increase_indent(flow=True, sequence=True) + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column, + force_flow_indent) + self.write_indicator(' ' * ind + '[', True, whitespace=True) + if not force_flow_indent: + self.increase_indent(flow=True, sequence=True) + self.flow_context.append('[') + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + self.write_indicator(']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty flow sequence + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator(']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow sequence + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self, single=False, force_flow_indent=False): + # type: (Optional[bool], Optional[bool]) -> None + if force_flow_indent: + self.increase_indent(flow=True, sequence=False) + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column, + force_flow_indent) + map_init = '{' + if ( + single + and self.flow_level + and self.flow_context[-1] == '[' + and not self.canonical + and not self.brace_single_entry_mapping_in_flow_sequence + ): + # single map item with flow context, no curly braces necessary + map_init = '' + self.write_indicator(' ' * ind + map_init, True, whitespace=True) + self.flow_context.append(map_init) + if not force_flow_indent: + self.increase_indent(flow=True, sequence=False) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '{' # empty flow mapping + self.write_indicator('}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty mapping + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + # if self.event.comment and self.event.comment[1]: + # self.write_pre_comment(self.event) + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped in ['{', ''] + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + if popped != '': + self.write_indicator('}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow mapping, never reached on empty mappings + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + # type: () -> None + self.write_indicator(self.prefixed_colon, False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + # type: () -> None + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(self.prefixed_colon, True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + # type: () -> None + if self.mapping_context: + indentless = not self.indention + else: + indentless = False + if not self.compact_seq_seq and self.column != 0: + self.write_line_break() + self.increase_indent(flow=False, sequence=True, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + # type: () -> Any + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + # type: (bool) -> None + if not first and isinstance(self.event, SequenceEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments on a block list e.g. empty line + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + self.no_newline = False + else: + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + nonl = self.no_newline if self.column == 0 else False + self.write_indent() + ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0 + self.write_indicator(' ' * ind + '-', True, indention=True) + if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent: + self.no_newline = True + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + # type: () -> None + if not self.mapping_context and not (self.compact_seq_map or self.column == 0): + self.write_line_break() + self.increase_indent(flow=False, sequence=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + # type: () -> None + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + # type: (Any) -> None + if not first and isinstance(self.event, MappingEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.write_indent() + if self.check_simple_key(): + if not isinstance( + self.event, (SequenceStartEvent, MappingStartEvent) + ): # sequence keys + try: + if self.event.style == '?': + self.write_indicator('?', True, indention=True) + except AttributeError: # aliases have no style + pass + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + # test on style for alias in !!set + if isinstance(self.event, AliasEvent) and not self.event.style == '?': + self.stream.write(' ') + else: + self.write_indicator('?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + # type: () -> None + if getattr(self.event, 'style', None) != '?': + # prefix = '' + if self.indent == 0 and self.top_level_colon_align is not None: + # write non-prefixed colon + c = ' ' * (self.top_level_colon_align - self.column) + self.colon + else: + c = self.prefixed_colon + self.write_indicator(c, False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + # type: () -> None + self.write_indent() + self.write_indicator(self.prefixed_colon, True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + # type: () -> bool + return ( + isinstance(self.event, SequenceStartEvent) + and bool(self.events) + and isinstance(self.events[0], SequenceEndEvent) + ) + + def check_empty_mapping(self): + # type: () -> bool + return ( + isinstance(self.event, MappingStartEvent) + and bool(self.events) + and isinstance(self.events[0], MappingEndEvent) + ) + + def check_empty_document(self): + # type: () -> bool + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return ( + isinstance(event, ScalarEvent) + and event.anchor is None + and event.tag is None + and event.implicit + and event.value == "" + ) + + def check_simple_key(self): + # type: () -> bool + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if ( + isinstance(self.event, (ScalarEvent, CollectionStartEvent)) + and self.event.tag is not None + ): + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return length < self.MAX_SIMPLE_KEY_LENGTH and ( + isinstance(self.event, AliasEvent) + or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True) + or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True) + or ( + isinstance(self.event, ScalarEvent) + # if there is an explicit style for an empty string, it is a simple key + and not (self.analysis.empty and self.style and self.style not in '\'"') + and not self.analysis.multiline + ) + or self.check_empty_sequence() + or self.check_empty_mapping() + ) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + # type: (Any) -> bool + if self.event.anchor is None: + self.prepared_anchor = None + return False + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator + self.prepared_anchor, True) + # issue 288 + self.no_newline = False + self.prepared_anchor = None + return True + + def process_tag(self): + # type: () -> None + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ( + self.event.value == '' + and self.style == "'" + and tag == 'tag:yaml.org,2002:null' + and self.alt_null is not None + ): + self.event.value = self.alt_null + self.analysis = None + self.style = self.choose_scalar_style() + if (not self.canonical or tag is None) and ( + (self.style == "" and self.event.implicit[0]) + or (self.style != "" and self.event.implicit[1]) + ): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = '!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError('tag is not specified') + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + if ( + self.sequence_context + and not self.flow_level + and isinstance(self.event, ScalarEvent) + ): + self.no_newline = True + self.prepared_tag = None + + def choose_scalar_style(self): + # type: () -> Any + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if (not self.event.style or self.event.style == '?') and ( + self.event.implicit[0] or not self.event.implicit[2] + ): + if not ( + self.simple_key_context and (self.analysis.empty or self.analysis.multiline) + ) and ( + self.flow_level + and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain) + ): + return "" + self.analysis.allow_block = True + if self.event.style and self.event.style in '|>': + if ( + not self.flow_level + and not self.simple_key_context + and self.analysis.allow_block + ): + return self.event.style + if not self.event.style and self.analysis.allow_double_quoted: + if "'" in self.event.value or '\n' in self.event.value: + return '"' + if not self.event.style or self.event.style == "'": + if self.analysis.allow_single_quoted and not ( + self.simple_key_context and self.analysis.multiline + ): + return "'" + return '"' + + def process_scalar(self): + # type: () -> None + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = not self.simple_key_context + # if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + # nprint('xx', self.sequence_context, self.flow_level) + if self.sequence_context and not self.flow_level: + self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == "'": + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + if ( + self.event.comment + and self.event.comment[0] + and self.event.comment[0].column >= self.indent + ): + # comment following a folded scalar must dedent (issue 376) + self.event.comment[0].column = self.indent - 1 # type: ignore + elif self.style == '|': + # self.write_literal(self.analysis.scalar, self.event.comment) + try: + cmx = self.event.comment[1][0] + except (IndexError, TypeError): + cmx = "" + self.write_literal(self.analysis.scalar, cmx) + if ( + self.event.comment + and self.event.comment[0] + and self.event.comment[0].column >= self.indent + ): + # comment following a literal scalar must dedent (issue 376) + self.event.comment[0].column = self.indent - 1 # type: ignore + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + if self.event.comment: + self.write_post_comment(self.event) + + # Analyzers. + + def prepare_version(self, version): + # type: (Any) -> Any + major, minor = version + if major != 1: + raise EmitterError( + _F('unsupported YAML version: {major:d}.{minor:d}', major=major, minor=minor) + ) + return _F('{major:d}.{minor:d}', major=major, minor=minor) + + def prepare_tag_handle(self, handle): + # type: (Any) -> Any + if not handle: + raise EmitterError('tag handle must not be empty') + if handle[0] != '!' or handle[-1] != '!': + raise EmitterError( + _F("tag handle must start and end with '!': {handle!r}", handle=handle) + ) + for ch in handle[1:-1]: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_'): + raise EmitterError( + _F( + 'invalid character {ch!r} in the tag handle: {handle!r}', + ch=ch, + handle=handle, + ) + ) + return handle + + def prepare_tag_prefix(self, prefix): + # type: (Any) -> Any + if not prefix: + raise EmitterError('tag prefix must not be empty') + chunks = [] # type: List[Any] + start = end = 0 + if prefix[0] == '!': + end = 1 + ch_set = "-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += '#' + while end < len(prefix): + ch = prefix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in ch_set: + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end + 1 + data = ch + for ch in data: + chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch))) + if start < end: + chunks.append(prefix[start:end]) + return "".join(chunks) + + def prepare_tag(self, tag): + # type: (Any) -> Any + if not tag: + raise EmitterError('tag must not be empty') + if tag == '!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix) :] + chunks = [] # type: List[Any] + start = end = 0 + ch_set = "-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += '#' + while end < len(suffix): + ch = suffix[end] + if ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in ch_set + or (ch == '!' and handle != '!') + ): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end + 1 + data = ch + for ch in data: + chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch))) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = "".join(chunks) + if handle: + return _F('{handle!s}{suffix_text!s}', handle=handle, suffix_text=suffix_text) + else: + return _F('!<{suffix_text!s}>', suffix_text=suffix_text) + + def prepare_anchor(self, anchor): + # type: (Any) -> Any + if not anchor: + raise EmitterError('anchor must not be empty') + for ch in anchor: + if not check_anchorname_char(ch): + raise EmitterError( + _F( + 'invalid character {ch!r} in the anchor: {anchor!r}', + ch=ch, + anchor=anchor, + ) + ) + return anchor + + def analyze_scalar(self, scalar): + # type: (Any) -> Any + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis( + scalar=scalar, + empty=True, + multiline=False, + allow_flow_plain=False, + allow_block_plain=True, + allow_single_quoted=True, + allow_double_quoted=True, + allow_block=False, + ) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith('---') or scalar.startswith('...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029' + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in '#,[]{}&*!|>\'"%@`': + flow_indicators = True + block_indicators = True + if ch in '?:': # ToDo + if self.serializer.use_version == (1, 1): + flow_indicators = True + elif len(scalar) == 1: # single character + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in ',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859 + flow_indicators = True + if ch == '?' and self.serializer.use_version == (1, 1): + flow_indicators = True + if ch == ':': + if followed_by_whitespace: + flow_indicators = True + block_indicators = True + if ch == '#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in '\n\x85\u2028\u2029': + line_breaks = True + if not (ch == '\n' or '\x20' <= ch <= '\x7E'): + if ( + ch == '\x85' + or '\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD' + or (self.unicode_supplementary and ('\U00010000' <= ch <= '\U0010FFFF')) + ) and ch != '\uFEFF': + # unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == ' ': + if index == 0: + leading_space = True + if index == len(scalar) - 1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in '\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar) - 1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = ch in '\0 \t\r\n\x85\u2028\u2029' + followed_by_whitespace = ( + index + 1 >= len(scalar) or scalar[index + 1] in '\0 \t\r\n\x85\u2028\u2029' + ) + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if leading_space or leading_break or trailing_space or trailing_break: + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if special_characters: + allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False + elif space_break: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + if not self.allow_space_break: + allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis( + scalar=scalar, + empty=False, + multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block, + ) + + # Writers. + + def flush_stream(self): + # type: () -> None + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # type: () -> None + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write('\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + # type: () -> None + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): + # type: (Any, Any, bool, bool) -> None + if self.whitespace or not need_whitespace: + data = indicator + else: + data = ' ' + indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + # type: () -> None + indent = self.indent or 0 + if ( + not self.indention + or self.column > indent + or (self.column == indent and not self.whitespace) + ): + if bool(self.no_newline): + self.no_newline = False + else: + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = ' ' * (indent - self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + + def write_line_break(self, data=None): + # type: (Any) -> None + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + # type: (Any) -> None + data = _F('%YAML {version_text!s}', version_text=version_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + # type: (Any, Any) -> None + data = _F( + '%TAG {handle_text!s} {prefix_text!s}', + handle_text=handle_text, + prefix_text=prefix_text, + ) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator("'", True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != ' ': + if ( + start + 1 == end + and self.column > self.best_width + and split + and start != 0 + and end != len(text) + ): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029' or ch == "'": + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == "'": + data = "''" + self.column += 2 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = ch == ' ' + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + self.write_indicator("'", False) + + ESCAPE_REPLACEMENTS = { + '\0': '0', + '\x07': 'a', + '\x08': 'b', + '\x09': 't', + '\x0A': 'n', + '\x0B': 'v', + '\x0C': 'f', + '\x0D': 'r', + '\x1B': 'e', + '"': '"', + '\\': '\\', + '\x85': 'N', + '\xA0': '_', + '\u2028': 'L', + '\u2029': 'P', + } + + def write_double_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator('"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ( + ch is None + or ch in '"\\\x85\u2028\u2029\uFEFF' + or not ( + '\x20' <= ch <= '\x7E' + or ( + self.allow_unicode + and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD') + ) + ) + ): + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = '\\' + self.ESCAPE_REPLACEMENTS[ch] + elif ch <= '\xFF': + data = _F('\\x{ord_ch:02X}', ord_ch=ord(ch)) + elif ch <= '\uFFFF': + data = _F('\\u{ord_ch:04X}', ord_ch=ord(ch)) + else: + data = _F('\\U{ord_ch:08X}', ord_ch=ord(ch)) + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ( + 0 < end < len(text) - 1 + and (ch == ' ' or start >= end) + and self.column + (end - start) > self.best_width + and split + ): + data = text[start:end] + '\\' + if start < end: + start = end + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == ' ': + data = '\\' + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator('"', False) + + def determine_block_hints(self, text): + # type: (Any) -> Any + indent = 0 + indicator = '' + hints = '' + if text: + if text[0] in ' \n\x85\u2028\u2029': + indent = self.best_sequence_indent + hints += str(indent) + elif self.root_context: + for end in ['\n---', '\n...']: + pos = 0 + while True: + pos = text.find(end, pos) + if pos == -1: + break + try: + if text[pos + 4] in ' \r\n': + break + except IndexError: + pass + pos += 1 + if pos > -1: + break + if pos > 0: + indent = self.best_sequence_indent + if text[-1] not in '\n\x85\u2028\u2029': + indicator = '-' + elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': + indicator = '+' + hints += indicator + return hints, indent, indicator + + def write_folded(self, text): + # type: (Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator('>' + hints, True) + if _indicator == '+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029\a': + if ( + not leading_space + and ch is not None + and ch != ' ' + and text[start] == '\n' + ): + self.write_line_break() + leading_space = ch == ' ' + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != ' ': + if start + 1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029\a': + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch == '\a': + if end < (len(text) - 1) and not text[end + 2].isspace(): + self.write_line_break() + self.write_indent() + end += 2 # \a and the space that is inserted on the fold + else: + raise EmitterError('unexcpected fold indicator \\a before space') + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in '\n\x85\u2028\u2029' + spaces = ch == ' ' + end += 1 + + def write_literal(self, text, comment=None): + # type: (Any, Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + # if comment is not None: + # try: + # hints += comment[1][0] + # except (TypeError, IndexError) as e: + # pass + if not isinstance(comment, str): + comment = '' + self.write_indicator('|' + hints + comment, True) + # try: + # nprintf('selfev', comment) + # cmx = comment[1][0] + # if cmx: + # self.stream.write(cmx) + # except (TypeError, IndexError) as e: + # pass + if _indicator == '+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + if self.root_context: + idnx = self.indent if self.indent is not None else 0 + self.stream.write(' ' * (_indent + idnx)) + else: + self.write_indent() + start = end + else: + if ch is None or ch in '\n\x85\u2028\u2029': + data = text[start:end] + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + + def write_plain(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + else: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = ' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != ' ': + if start + 1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + start = end + elif breaks: + if ch not in '\n\x85\u2028\u2029': # type: ignore + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + try: + self.stream.write(data) + except: # NOQA + sys.stdout.write(repr(data) + '\n') + raise + start = end + if ch is not None: + spaces = ch == ' ' + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + + def write_comment(self, comment, pre=False): + # type: (Any, bool) -> None + value = comment.value + # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value)) + if not pre and value[-1] == '\n': + value = value[:-1] + try: + # get original column position + col = comment.start_mark.column + if comment.value and comment.value.startswith('\n'): + # never inject extra spaces if the comment starts with a newline + # and not a real comment (e.g. if you have an empty line following a key-value + col = self.column + elif col < self.column + 1: + ValueError + except ValueError: + col = self.column + 1 + # nprint('post_comment', self.line, self.column, value) + try: + # at least one space if the current column >= the start column of the comment + # but not at the start of a line + nr_spaces = col - self.column + if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n': + nr_spaces = 1 + value = ' ' * nr_spaces + value + try: + if bool(self.encoding): + value = value.encode(self.encoding) + except UnicodeDecodeError: + pass + self.stream.write(value) + except TypeError: + raise + if not pre: + self.write_line_break() + + def write_pre_comment(self, event): + # type: (Any) -> bool + comments = event.comment[1] + if comments is None: + return False + try: + start_events = (MappingStartEvent, SequenceStartEvent) + for comment in comments: + if isinstance(event, start_events) and getattr(comment, 'pre_done', None): + continue + if self.column != 0: + self.write_line_break() + self.write_comment(comment, pre=True) + if isinstance(event, start_events): + comment.pre_done = True + except TypeError: + sys.stdout.write('eventtt {} {}'.format(type(event), event)) + raise + return True + + def write_post_comment(self, event): + # type: (Any) -> bool + if self.event.comment[0] is None: + return False + comment = event.comment[0] + self.write_comment(comment) + return True diff --git a/pipenv/vendor/ruamel/yaml/error.py b/pipenv/vendor/ruamel/yaml/error.py new file mode 100644 index 0000000000..4a1a938c77 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/error.py @@ -0,0 +1,332 @@ +# coding: utf-8 + +import warnings +import textwrap + +from pipenv.vendor.ruamel.yaml.compat import _F + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + + +__all__ = [ + 'FileMark', + 'StringMark', + 'CommentMark', + 'YAMLError', + 'MarkedYAMLError', + 'ReusedAnchorWarning', + 'UnsafeLoaderWarning', + 'MarkedYAMLWarning', + 'MarkedYAMLFutureWarning', +] + + +class StreamMark: + __slots__ = 'name', 'index', 'line', 'column' + + def __init__(self, name, index, line, column): + # type: (Any, int, int, int) -> None + self.name = name + self.index = index + self.line = line + self.column = column + + def __str__(self): + # type: () -> Any + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + return where + + def __eq__(self, other): + # type: (Any) -> bool + if self.line != other.line or self.column != other.column: + return False + if self.name != other.name or self.index != other.index: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) + + +class FileMark(StreamMark): + __slots__ = () + + +class StringMark(StreamMark): + __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer' + + def __init__(self, name, index, line, column, buffer, pointer): + # type: (Any, int, int, int, Any, Any) -> None + StreamMark.__init__(self, name, index, line, column) + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + # type: (int, int) -> Any + if self.buffer is None: # always False + return None + head = "" + start = self.pointer + while start > 0 and self.buffer[start - 1] not in '\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer - start > max_length / 2 - 1: + head = ' ... ' + start += 5 + break + tail = "" + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': + end += 1 + if end - self.pointer > max_length / 2 - 1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end] + caret = '^' + caret = '^ (line: {})'.format(self.line + 1) + return ( + ' ' * indent + + head + + snippet + + tail + + '\n' + + ' ' * (indent + self.pointer - start + len(head)) + + caret + ) + + def __str__(self): + # type: () -> Any + snippet = self.get_snippet() + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + if snippet is not None: + where += ':\n' + snippet + return where + + def __repr__(self): + # type: () -> Any + snippet = self.get_snippet() + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + if snippet is not None: + where += ':\n' + snippet + return where + + +class CommentMark: + __slots__ = ('column',) + + def __init__(self, column): + # type: (Any) -> None + self.column = column + + +class YAMLError(Exception): + pass + + +class MarkedYAMLError(YAMLError): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + # warn is ignored + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + return '\n'.join(lines) + + +class YAMLStreamError(Exception): + pass + + +class YAMLWarning(Warning): + pass + + +class MarkedYAMLWarning(YAMLWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) + + +class ReusedAnchorWarning(YAMLWarning): + pass + + +class UnsafeLoaderWarning(YAMLWarning): + text = """ +The default 'Loader' for 'load(stream)' without further arguments can be unsafe. +Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK. +Alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning) + +In most other cases you should consider using 'safe_load(stream)'""" + pass + + +warnings.simplefilter('once', UnsafeLoaderWarning) + + +class MantissaNoDotYAML1_1Warning(YAMLWarning): + def __init__(self, node, flt_str): + # type: (Any, Any) -> None + self.node = node + self.flt = flt_str + + def __str__(self): + # type: () -> Any + line = self.node.start_mark.line + col = self.node.start_mark.column + return """ +In YAML 1.1 floating point values should have a dot ('.') in their mantissa. +See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification +( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2 + +Correct your float: "{}" on line: {}, column: {} + +or alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning) + +""".format( + self.flt, line, col + ) + + +warnings.simplefilter('once', MantissaNoDotYAML1_1Warning) + + +class YAMLFutureWarning(Warning): + pass + + +class MarkedYAMLFutureWarning(YAMLFutureWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) diff --git a/pipenv/vendor/ruamel/yaml/events.py b/pipenv/vendor/ruamel/yaml/events.py new file mode 100644 index 0000000000..486c58e8c4 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/events.py @@ -0,0 +1,196 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.compat import _F + +# Abstract classes. + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +SHOW_LINES = False + + +def CommentCheck(): + # type: () -> None + pass + + +class Event: + __slots__ = 'start_mark', 'end_mark', 'comment' + + def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck): + # type: (Any, Any, Any) -> None + self.start_mark = start_mark + self.end_mark = end_mark + # assert comment is not CommentCheck + if comment is CommentCheck: + comment = None + self.comment = comment + + def __repr__(self): + # type: () -> Any + if True: + arguments = [] + if hasattr(self, 'value'): + # if you use repr(getattr(self, 'value')) then flake8 complains about + # abuse of getattr with a constant. When you change to self.value + # then mypy throws an error + arguments.append(repr(self.value)) # type: ignore + for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']: + v = getattr(self, key, None) + if v is not None: + arguments.append(_F('{key!s}={v!r}', key=key, v=v)) + if self.comment not in [None, CommentCheck]: + arguments.append('comment={!r}'.format(self.comment)) + if SHOW_LINES: + arguments.append( + '({}:{}/{}:{})'.format( + self.start_mark.line, + self.start_mark.column, + self.end_mark.line, + self.end_mark.column, + ) + ) + arguments = ', '.join(arguments) # type: ignore + else: + attributes = [ + key + for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style'] + if hasattr(self, key) + ] + arguments = ', '.join( + [_F('{k!s}={attr!r}', k=key, attr=getattr(self, key)) for key in attributes] + ) + if self.comment not in [None, CommentCheck]: + arguments += ', comment={!r}'.format(self.comment) + return _F( + '{self_class_name!s}({arguments!s})', + self_class_name=self.__class__.__name__, + arguments=arguments, + ) + + +class NodeEvent(Event): + __slots__ = ('anchor',) + + def __init__(self, anchor, start_mark=None, end_mark=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.anchor = anchor + + +class CollectionStartEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items' + + def __init__( + self, + anchor, + tag, + implicit, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + nr_items=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.flow_style = flow_style + self.nr_items = nr_items + + +class CollectionEndEvent(Event): + __slots__ = () + + +# Implementations. + + +class StreamStartEvent(Event): + __slots__ = ('encoding',) + + def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.encoding = encoding + + +class StreamEndEvent(Event): + __slots__ = () + + +class DocumentStartEvent(Event): + __slots__ = 'explicit', 'version', 'tags' + + def __init__( + self, + start_mark=None, + end_mark=None, + explicit=None, + version=None, + tags=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + self.version = version + self.tags = tags + + +class DocumentEndEvent(Event): + __slots__ = ('explicit',) + + def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + + +class AliasEvent(NodeEvent): + __slots__ = 'style' + + def __init__(self, anchor, start_mark=None, end_mark=None, style=None, comment=None): + # type: (Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.style = style + + +class ScalarEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'value', 'style' + + def __init__( + self, + anchor, + tag, + implicit, + value, + start_mark=None, + end_mark=None, + style=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.value = value + self.style = style + + +class SequenceStartEvent(CollectionStartEvent): + __slots__ = () + + +class SequenceEndEvent(CollectionEndEvent): + __slots__ = () + + +class MappingStartEvent(CollectionStartEvent): + __slots__ = () + + +class MappingEndEvent(CollectionEndEvent): + __slots__ = () diff --git a/pipenv/vendor/ruamel/yaml/loader.py b/pipenv/vendor/ruamel/yaml/loader.py new file mode 100644 index 0000000000..b2594bbd61 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/loader.py @@ -0,0 +1,75 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.reader import Reader +from pipenv.vendor.ruamel.yaml.scanner import Scanner, RoundTripScanner +from pipenv.vendor.ruamel.yaml.parser import Parser, RoundTripParser +from pipenv.vendor.ruamel.yaml.composer import Composer +from pipenv.vendor.ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from pipenv.vendor.ruamel.yaml.resolver import VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import StreamTextType, VersionType # NOQA + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader'] + + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + BaseConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + SafeConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + Constructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class RoundTripLoader( + Reader, + RoundTripScanner, + RoundTripParser, + Composer, + RoundTripConstructor, + VersionedResolver, +): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + # self.reader = Reader.__init__(self, stream) + self.comment_handling = None # issue 385 + Reader.__init__(self, stream, loader=self) + RoundTripScanner.__init__(self, loader=self) + RoundTripParser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self) + VersionedResolver.__init__(self, version, loader=self) diff --git a/pipenv/vendor/ruamel/yaml/main.py b/pipenv/vendor/ruamel/yaml/main.py new file mode 100644 index 0000000000..7a6f6f3cbb --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/main.py @@ -0,0 +1,1667 @@ +# coding: utf-8 + +import sys +import os +import warnings +import glob +from importlib import import_module + + +import ruamel.yaml +from pipenv.vendor.ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA + +from pipenv.vendor.ruamel.yaml.tokens import * # NOQA +from pipenv.vendor.ruamel.yaml.events import * # NOQA +from pipenv.vendor.ruamel.yaml.nodes import * # NOQA + +from pipenv.vendor.ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA +from pipenv.vendor.ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA +from pipenv.vendor.ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, nprint, nprintf # NOQA +from pipenv.vendor.ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA +from pipenv.vendor.ruamel.yaml.representer import ( + BaseRepresenter, + SafeRepresenter, + Representer, + RoundTripRepresenter, +) +from pipenv.vendor.ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from pipenv.vendor.ruamel.yaml.loader import Loader as UnsafeLoader +from pipenv.vendor.ruamel.yaml.comments import CommentedMap, CommentedSeq, C_PRE + +if False: # MYPY + from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA + from pipenv.vendor.ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA + from pathlib import Path + +try: + from _ruamel_yaml import CParser, CEmitter # type: ignore +except: # NOQA + CParser = CEmitter = None + +# import io + + +# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a +# subset of abbreviations, which should be all caps according to PEP8 + + +class YAML: + def __init__(self, *, typ=None, pure=False, output=None, plug_ins=None): # input=None, + # type: (Any, Optional[Text], Any, Any, Any) -> None + """ + typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default) + 'safe' -> SafeLoader/SafeDumper, + 'unsafe' -> normal/unsafe Loader/Dumper + 'base' -> baseloader + pure: if True only use Python modules + input/output: needed to work as context manager + plug_ins: a list of plug-in files + """ + + self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ]) + self.pure = pure + + # self._input = input + self._output = output + self._context_manager = None # type: Any + + self.plug_ins = [] # type: List[Any] + for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins(): + file_name = pu.replace(os.sep, '.') + self.plug_ins.append(import_module(file_name)) + self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any + self.allow_unicode = True + self.Reader = None # type: Any + self.Representer = None # type: Any + self.Constructor = None # type: Any + self.Scanner = None # type: Any + self.Serializer = None # type: Any + self.default_flow_style = None # type: Any + self.comment_handling = None + typ_found = 1 + setup_rt = False + if 'rt' in self.typ: + setup_rt = True + elif 'safe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.SafeRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.SafeConstructor + elif 'base' in self.typ: + self.Emitter = ruamel.yaml.emitter.Emitter + self.Representer = ruamel.yaml.representer.BaseRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.BaseConstructor + elif 'unsafe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.Representer + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.Constructor + elif 'rtsc' in self.typ: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruamel.yaml.emitter.Emitter + self.Serializer = ruamel.yaml.serializer.Serializer + self.Representer = ruamel.yaml.representer.RoundTripRepresenter + self.Scanner = ruamel.yaml.scanner.RoundTripScannerSC + # no optimized rt-parser yet + self.Parser = ruamel.yaml.parser.RoundTripParserSC + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.RoundTripConstructor + self.comment_handling = C_PRE + else: + setup_rt = True + typ_found = 0 + if setup_rt: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruamel.yaml.emitter.Emitter + self.Serializer = ruamel.yaml.serializer.Serializer + self.Representer = ruamel.yaml.representer.RoundTripRepresenter + self.Scanner = ruamel.yaml.scanner.RoundTripScanner + # no optimized rt-parser yet + self.Parser = ruamel.yaml.parser.RoundTripParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.RoundTripConstructor + del setup_rt + self.stream = None + self.canonical = None + self.old_indent = None + self.width = None + self.line_break = None + + self.map_indent = None + self.sequence_indent = None + self.sequence_dash_offset = 0 + self.compact_seq_seq = None + self.compact_seq_map = None + self.sort_base_mapping_type_on_output = None # default: sort + + self.top_level_colon_align = None + self.prefix_colon = None + self.version = None + self.preserve_quotes = None + self.allow_duplicate_keys = False # duplicate keys in map, set + self.encoding = 'utf-8' + self.explicit_start = None + self.explicit_end = None + self.tags = None + self.default_style = None + self.top_level_block_style_scalar_no_indent_error_1_1 = False + # directives end indicator with single scalar document + self.scalar_after_indicator = None + # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}] + self.brace_single_entry_mapping_in_flow_sequence = False + for module in self.plug_ins: + if getattr(module, 'typ', None) in self.typ: + typ_found += 1 + module.init_typ(self) + break + if typ_found == 0: + raise NotImplementedError( + 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ) + ) + + @property + def reader(self): + # type: () -> Any + try: + return self._reader # type: ignore + except AttributeError: + self._reader = self.Reader(None, loader=self) + return self._reader + + @property + def scanner(self): + # type: () -> Any + try: + return self._scanner # type: ignore + except AttributeError: + self._scanner = self.Scanner(loader=self) + return self._scanner + + @property + def parser(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Parser is not CParser: + setattr(self, attr, self.Parser(loader=self)) + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + else: + # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'): + # # pathlib.Path() instance + # setattr(self, attr, CParser(self._stream)) + # else: + setattr(self, attr, CParser(self._stream)) + # self._parser = self._composer = self + # nprint('scanner', self.loader.scanner) + + return getattr(self, attr) + + @property + def composer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Composer(loader=self)) + return getattr(self, attr) + + @property + def constructor(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self) + cnst.allow_duplicate_keys = self.allow_duplicate_keys + setattr(self, attr, cnst) + return getattr(self, attr) + + @property + def resolver(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Resolver(version=self.version, loader=self)) + return getattr(self, attr) + + @property + def emitter(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Emitter is not CEmitter: + _emitter = self.Emitter( + None, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + prefix_colon=self.prefix_colon, + brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA + dumper=self, + ) + setattr(self, attr, _emitter) + if self.map_indent is not None: + _emitter.best_map_indent = self.map_indent + if self.sequence_indent is not None: + _emitter.best_sequence_indent = self.sequence_indent + if self.sequence_dash_offset is not None: + _emitter.sequence_dash_offset = self.sequence_dash_offset + # _emitter.block_seq_indent = self.sequence_dash_offset + if self.compact_seq_seq is not None: + _emitter.compact_seq_seq = self.compact_seq_seq + if self.compact_seq_map is not None: + _emitter.compact_seq_map = self.compact_seq_map + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + return None + return getattr(self, attr) + + @property + def serializer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr( + self, + attr, + self.Serializer( + encoding=self.encoding, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + dumper=self, + ), + ) + return getattr(self, attr) + + @property + def representer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + repres = self.Representer( + default_style=self.default_style, + default_flow_style=self.default_flow_style, + dumper=self, + ) + if self.sort_base_mapping_type_on_output is not None: + repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output + setattr(self, attr, repres) + return getattr(self, attr) + + def scan(self, stream): + # type: (StreamTextType) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.scan(fp) + _, parser = self.get_constructor_parser(stream) + try: + while self.scanner.check_token(): + yield self.scanner.get_token() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def parse(self, stream): + # type: (StreamTextType) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.parse(fp) + _, parser = self.get_constructor_parser(stream) + try: + while parser.check_event(): + yield parser.get_event() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def compose(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.compose(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.composer.get_single_node() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def compose_all(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.composer.check_node(): + yield constructor.composer.get_node() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + # separate output resolver? + + # def load(self, stream=None): + # if self._context_manager: + # if not self._input: + # raise TypeError("Missing input stream while dumping from context manager") + # for data in self._context_manager.load(): + # yield data + # return + # if stream is None: + # raise TypeError("Need a stream argument when not loading from context manager") + # return self.load_one(stream) + + def load(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + at this point you either have the non-pure Parser (which has its own reader and + scanner) or you have the pure Parser. + If the pure Parser is set, then set the Reader and Scanner, if not already set. + If either the Scanner or Reader are set, you cannot use the non-pure Parser, + so reset it to the pure parser and set the Reader resp. Scanner if necessary + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.load(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.get_single_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def load_all(self, stream): # *, skip=None): + # type: (Union[Path, StreamTextType]) -> Any + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('r') as fp: + for d in self.load_all(fp): + yield d + return + # if skip is None: + # skip = [] + # elif isinstance(skip, int): + # skip = [skip] + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.check_data(): + yield constructor.get_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def get_constructor_parser(self, stream): + # type: (StreamTextType) -> Any + """ + the old cyaml needs special setup, and therefore the stream + """ + if self.Parser is not CParser: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.reader.stream = stream + else: + if self.Reader is not None: + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + elif self.Scanner is not None: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + else: + # combined C level reader>scanner>parser + # does some calls to the resolver, e.g. BaseResolver.descend_resolver + # if you just initialise the CParser, to much of resolver.py + # is actually used + rslvr = self.Resolver + # if rslvr is ruamel.yaml.resolver.VersionedResolver: + # rslvr = ruamel.yaml.resolver.Resolver + + class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore + def __init__(selfx, stream, version=self.version, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA + CParser.__init__(selfx, stream) + selfx._parser = selfx._composer = selfx + self.Constructor.__init__(selfx, loader=selfx) + selfx.allow_duplicate_keys = self.allow_duplicate_keys + rslvr.__init__(selfx, version=version, loadumper=selfx) + + self._stream = stream + loader = XLoader(stream) + return loader, loader + return self.constructor, self.parser + + def emit(self, events, stream): + # type: (Any, Any) -> None + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + _, _, emitter = self.get_serializer_representer_emitter(stream, None) + try: + for event in events: + emitter.emit(event) + finally: + try: + emitter.dispose() + except AttributeError: + raise + + def serialize(self, node, stream): + # type: (Any, Optional[StreamType]) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + self.serialize_all([node], stream) + + def serialize_all(self, nodes, stream): + # type: (Any, Optional[StreamType]) -> Any + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + serializer, _, emitter = self.get_serializer_representer_emitter(stream, None) + try: + serializer.open() + for node in nodes: + serializer.serialize(node) + serializer.close() + finally: + try: + emitter.dispose() + except AttributeError: + raise + + def dump(self, data, stream=None, *, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + if not self._output: + raise TypeError('Missing output stream while dumping from context manager') + if transform is not None: + raise TypeError( + '{}.dump() in the context manager cannot have transform keyword ' + ''.format(self.__class__.__name__) + ) + self._context_manager.dump(data) + else: # old style + if stream is None: + raise TypeError('Need a stream argument when not dumping from context manager') + return self.dump_all([data], stream, transform=transform) + + def dump_all(self, documents, stream, *, transform=None): + # type: (Any, Union[Path, StreamType], Any) -> Any + if self._context_manager: + raise NotImplementedError + self._output = stream + self._context_manager = YAMLContextManager(self, transform=transform) + for data in documents: + self._context_manager.dump(data) + self._context_manager.teardown_output() + self._output = None + self._context_manager = None + + def Xdump_all(self, documents, stream, *, transform=None): + # type: (Any, Any, Any) -> Any + """ + Serialize a sequence of Python objects into a YAML stream. + """ + if not hasattr(stream, 'write') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('w') as fp: + return self.dump_all(documents, fp, transform=transform) + # The stream should have the methods `write` and possibly `flush`. + if self.top_level_colon_align is True: + tlca = max([len(str(x)) for x in documents[0]]) # type: Any + else: + tlca = self.top_level_colon_align + if transform is not None: + fstream = stream + if self.encoding is None: + stream = StringIO() + else: + stream = BytesIO() + serializer, representer, emitter = self.get_serializer_representer_emitter( + stream, tlca + ) + try: + self.serializer.open() + for data in documents: + try: + self.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + self.serializer.close() + finally: + try: + self.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + delattr(self, '_serializer') + delattr(self, '_emitter') + if transform: + val = stream.getvalue() + if self.encoding: + val = val.decode(self.encoding) + if fstream is None: + transform(val) + else: + fstream.write(transform(val)) + return None + + def get_serializer_representer_emitter(self, stream, tlca): + # type: (StreamType, Any) -> Any + # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler + if self.Emitter is not CEmitter: + if self.Serializer is None: + self.Serializer = ruamel.yaml.serializer.Serializer + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + if self.Serializer is not None: + # cannot set serializer with CEmitter + self.Emitter = ruamel.yaml.emitter.Emitter + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + # C routines + + rslvr = ( + ruamel.yaml.resolver.BaseResolver + if 'base' in self.typ + else ruamel.yaml.resolver.Resolver + ) + + class XDumper(CEmitter, self.Representer, rslvr): # type: ignore + def __init__( + selfx, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + selfx, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + selfx._emitter = selfx._serializer = selfx._representer = selfx + self.Representer.__init__( + selfx, default_style=default_style, default_flow_style=default_flow_style + ) + rslvr.__init__(selfx) + + self._stream = stream + dumper = XDumper( + stream, + default_style=self.default_style, + default_flow_style=self.default_flow_style, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + ) + self._emitter = self._serializer = dumper + return dumper, dumper, dumper + + # basic types + def map(self, **kw): + # type: (Any) -> Any + if 'rt' in self.typ: + return CommentedMap(**kw) + else: + return dict(**kw) + + def seq(self, *args): + # type: (Any) -> Any + if 'rt' in self.typ: + return CommentedSeq(*args) + else: + return list(*args) + + # helpers + def official_plug_ins(self): + # type: () -> Any + """search for list of subdirs that are plug-ins, if __file__ is not available, e.g. + single file installers that are not properly emulating a file-system (issue 324) + no plug-ins will be found. If any are packaged, you know which file that are + and you can explicitly provide it during instantiation: + yaml = ruamel.yaml.YAML(plug_ins=['ruamel/yaml/jinja2/__plug_in__']) + """ + try: + bd = os.path.dirname(__file__) + except NameError: + return [] + gpbd = os.path.dirname(os.path.dirname(bd)) + res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')] + return res + + def register_class(self, cls): + # type:(Any) -> Any + """ + register a class for dumping loading + - if it has attribute yaml_tag use that to register, else use class name + - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes + as mapping + """ + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + self.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + self.representer.add_representer(cls, t_y) + try: + self.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + self.constructor.add_constructor(tag, f_y) + return cls + + # ### context manager + + def __enter__(self): + # type: () -> Any + self._context_manager = YAMLContextManager(self) + return self + + def __exit__(self, typ, value, traceback): + # type: (Any, Any, Any) -> None + if typ: + nprint('typ', typ) + self._context_manager.teardown_output() + # self._context_manager.teardown_input() + self._context_manager = None + + # ### backwards compatibility + def _indent(self, mapping=None, sequence=None, offset=None): + # type: (Any, Any, Any) -> None + if mapping is not None: + self.map_indent = mapping + if sequence is not None: + self.sequence_indent = sequence + if offset is not None: + self.sequence_dash_offset = offset + + @property + def indent(self): + # type: () -> Any + return self._indent + + @indent.setter + def indent(self, val): + # type: (Any) -> None + self.old_indent = val + + @property + def block_seq_indent(self): + # type: () -> Any + return self.sequence_dash_offset + + @block_seq_indent.setter + def block_seq_indent(self, val): + # type: (Any) -> None + self.sequence_dash_offset = val + + def compact(self, seq_seq=None, seq_map=None): + # type: (Any, Any) -> None + self.compact_seq_seq = seq_seq + self.compact_seq_map = seq_map + + +class YAMLContextManager: + def __init__(self, yaml, transform=None): + # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None + self._yaml = yaml + self._output_inited = False + self._output_path = None + self._output = self._yaml._output + self._transform = transform + + # self._input_inited = False + # self._input = input + # self._input_path = None + # self._transform = yaml.transform + # self._fstream = None + + if not hasattr(self._output, 'write') and hasattr(self._output, 'open'): + # pathlib.Path() instance, open with the same mode + self._output_path = self._output + self._output = self._output_path.open('w') + + # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'): + # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'): + # # pathlib.Path() instance, open with the same mode + # self._input_path = self._input + # self._input = self._input_path.open('r') + + if self._transform is not None: + self._fstream = self._output + if self._yaml.encoding is None: + self._output = StringIO() + else: + self._output = BytesIO() + + def teardown_output(self): + # type: () -> None + if self._output_inited: + self._yaml.serializer.close() + else: + return + try: + self._yaml.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + try: + delattr(self._yaml, '_serializer') + delattr(self._yaml, '_emitter') + except AttributeError: + raise + if self._transform: + val = self._output.getvalue() + if self._yaml.encoding: + val = val.decode(self._yaml.encoding) + if self._fstream is None: + self._transform(val) + else: + self._fstream.write(self._transform(val)) + self._fstream.flush() + self._output = self._fstream # maybe not necessary + if self._output_path is not None: + self._output.close() + + def init_output(self, first_data): + # type: (Any) -> None + if self._yaml.top_level_colon_align is True: + tlca = max([len(str(x)) for x in first_data]) # type: Any + else: + tlca = self._yaml.top_level_colon_align + self._yaml.get_serializer_representer_emitter(self._output, tlca) + self._yaml.serializer.open() + self._output_inited = True + + def dump(self, data): + # type: (Any) -> None + if not self._output_inited: + self.init_output(data) + try: + self._yaml.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + + # def teardown_input(self): + # pass + # + # def init_input(self): + # # set the constructor and parser on YAML() instance + # self._yaml.get_constructor_parser(stream) + # + # def load(self): + # if not self._input_inited: + # self.init_input() + # try: + # while self._yaml.constructor.check_data(): + # yield self._yaml.constructor.get_data() + # finally: + # parser.dispose() + # try: + # self._reader.reset_reader() # type: ignore + # except AttributeError: + # pass + # try: + # self._scanner.reset_scanner() # type: ignore + # except AttributeError: + # pass + + +def yaml_object(yml): + # type: (Any) -> Any + """ decorator for classes that needs to dump/load objects + The tag for such objects is taken from the class attribute yaml_tag (or the + class name in lowercase in case unavailable) + If methods to_yaml and/or from_yaml are available, these are called for dumping resp. + loading, default routines (dumping a mapping of the attributes) used otherwise. + """ + + def yo_deco(cls): + # type: (Any) -> Any + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + yml.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + yml.representer.add_representer(cls, t_y) + try: + yml.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + yml.constructor.add_constructor(tag, f_y) + return cls + + return yo_deco + + +######################################################################################## +def warn_deprecation(fun, method, arg=''): + # type: (Any, Any, str) -> None + from pipenv.vendor.ruamel.yaml.compat import _F + + warnings.warn( + _F( + '\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA + fun=fun, + method=method, + arg=arg, + ), + PendingDeprecationWarning, # this will show when testing with pytest/tox + stacklevel=3, + ) + + +######################################################################################## + + +def scan(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader.scanner.check_token(): + yield loader.scanner.get_token() + finally: + loader._parser.dispose() + + +def parse(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader._parser.check_event(): + yield loader._parser.get_event() + finally: + loader._parser.dispose() + + +def compose(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + + +def compose_all(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader.check_node(): + yield loader._composer.get_node() + finally: + loader._parser.dispose() + + +def load(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Any, Any, Any, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + warn_deprecation('load', 'load', arg="typ='unsafe', pure=True") + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any + try: + return loader._constructor.get_single_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def load_all(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Any, Any, Any, Any) -> Any # NOQA + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True") + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any + try: + while loader._constructor.check_data(): + yield loader._constructor.get_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def safe_load(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True") + return load(stream, SafeLoader, version) + + +def safe_load_all(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True") + return load_all(stream, SafeLoader, version) + + +def round_trip_load(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + warn_deprecation('round_trip_load_all', 'load') + return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def round_trip_load_all(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + warn_deprecation('round_trip_load_all', 'load_all') + return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def emit( + events, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, +): + # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('emit', 'emit', arg="typ='safe', pure=True") + getvalue = None + if stream is None: + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + ) + try: + for event in events: + dumper.emit(event) + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +enc = None + + +def serialize_all( + nodes, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, +): + # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True") + getvalue = None + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + version=version, + tags=tags, + explicit_start=explicit_start, + explicit_end=explicit_end, + ) + try: + dumper._serializer.open() + for node in nodes: + dumper.serialize(node) + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + # type: (Any, Optional[StreamType], Any, Any) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True") + return serialize_all([node], stream, Dumper=Dumper, **kwds) + + +def dump_all( + documents, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Any # NOQA + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True") + getvalue = None + if top_level_colon_align is True: + top_level_colon_align = max([len(str(x)) for x in documents[0]]) + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + try: + dumper._serializer.open() + for data in documents: + try: + dumper._representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + return None + + +def dump( + data, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[Any] # NOQA + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + + default_style ∈ None, '', '"', "'", '|', '>' + + """ + warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True") + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + ) + + +def safe_dump_all(documents, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[Any] + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + warn_deprecation('safe_dump_all', 'dump_all', arg="typ='safe', pure=True") + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + + +def safe_dump(data, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[Any] + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True") + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + + +def round_trip_dump( + data, + stream=None, + Dumper=RoundTripDumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[Any] # NOQA + allow_unicode = True if allow_unicode is None else allow_unicode + warn_deprecation('round_trip_dump', 'dump') + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + + +# Loader/Dumper are no longer composites, to get to the associated +# Resolver()/Representer(), etc., you need to instantiate the class + + +def add_implicit_resolver( + tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver +): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None and Dumper is None: + resolver.add_implicit_resolver(tag, regexp, first) + return + if Loader: + if hasattr(Loader, 'add_implicit_resolver'): + Loader.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_implicit_resolver'): + Dumper.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + + +# this code currently not tested +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None and Dumper is None: + resolver.add_path_resolver(tag, path, kind) + return + if Loader: + if hasattr(Loader, 'add_path_resolver'): + Loader.add_path_resolver(tag, path, kind) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_path_resolver'): + Dumper.add_path_resolver(tag, path, kind) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + + +def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add an object constructor for the given tag. + object_onstructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_constructor(tag, object_constructor) + else: + if hasattr(Loader, 'add_constructor'): + Loader.add_constructor(tag, object_constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, Loader): + Constructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_constructor(tag, object_constructor) + else: + raise NotImplementedError + + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + if False and hasattr(Loader, 'add_multi_constructor'): + Loader.add_multi_constructor(tag_prefix, constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, ruamel.yaml.loader.Loader): + Constructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + raise NotImplementedError + + +def add_representer(data_type, object_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + object_representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_representer(data_type, object_representer) + else: + if hasattr(Dumper, 'add_representer'): + Dumper.add_representer(data_type, object_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, Dumper): + Representer.add_representer(data_type, object_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_representer(data_type, object_representer) + else: + raise NotImplementedError + + +# this code currently not tested +def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + multi_representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_multi_representer(data_type, multi_representer) + else: + if hasattr(Dumper, 'add_multi_representer'): + Dumper.add_multi_representer(data_type, multi_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, Dumper): + Representer.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_multi_representer(data_type, multi_representer) + else: + raise NotImplementedError + + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + + def __init__(cls, name, bases, kwds): + # type: (Any, Any, Any) -> None + super().__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore + cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore + + +class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_constructor = Constructor + yaml_representer = Representer + + yaml_tag = None # type: Any + yaml_flow_style = None # type: Any + + @classmethod + def from_yaml(cls, constructor, node): + # type: (Any, Any) -> Any + """ + Convert a representation node to a Python object. + """ + return constructor.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, representer, data): + # type: (Any, Any) -> Any + """ + Convert a Python object to a representation node. + """ + return representer.represent_yaml_object( + cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style + ) diff --git a/pipenv/vendor/ruamel/yaml/nodes.py b/pipenv/vendor/ruamel/yaml/nodes.py new file mode 100644 index 0000000000..89550b2ada --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/nodes.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +import sys + +from pipenv.vendor.ruamel.yaml.compat import _F + +if False: # MYPY + from typing import Dict, Any, Text # NOQA + + +class Node: + __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor' + + def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.comment = comment + self.anchor = anchor + + def __repr__(self): + # type: () -> Any + value = self.value + # if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = f'<{len(value)} items>' + # else: + # if len(value) > 75: + # value = repr(value[:70]+' ... ') + # else: + # value = repr(value) + value = repr(value) + return _F( + '{class_name!s}(tag={self_tag!r}, value={value!s})', + class_name=self.__class__.__name__, + self_tag=self.tag, + value=value, + ) + + def dump(self, indent=0): + # type: (int) -> None + if isinstance(self.value, str): + sys.stdout.write( + '{}{}(tag={!r}, value={!r})\n'.format( + ' ' * indent, self.__class__.__name__, self.tag, self.value + ) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + return + sys.stdout.write( + '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + for v in self.value: + if isinstance(v, tuple): + for v1 in v: + v1.dump(indent + 1) + elif isinstance(v, Node): + v.dump(indent + 1) + else: + sys.stdout.write('Node value type? {}\n'.format(type(v))) + + +class ScalarNode(Node): + """ + styles: + ? -> set() ? key, no value + " -> double quoted + ' -> single quoted + | -> literal style + > -> folding style + """ + + __slots__ = ('style',) + id = 'scalar' + + def __init__( + self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor) + self.style = style + + +class CollectionNode(Node): + __slots__ = ('flow_style',) + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment) + self.flow_style = flow_style + self.anchor = anchor + + +class SequenceNode(CollectionNode): + __slots__ = () + id = 'sequence' + + +class MappingNode(CollectionNode): + __slots__ = ('merge',) + id = 'mapping' + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + CollectionNode.__init__( + self, tag, value, start_mark, end_mark, flow_style, comment, anchor + ) + self.merge = None diff --git a/pipenv/vendor/ruamel/yaml/parser.py b/pipenv/vendor/ruamel/yaml/parser.py new file mode 100644 index 0000000000..f70d22853c --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/parser.py @@ -0,0 +1,884 @@ +# coding: utf-8 + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* +# STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | +# indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* +# BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START <} +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START +# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR +# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START +# FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } + +# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py +# only to not do anything with the package afterwards +# and for Jython too + + +from pipenv.vendor.ruamel.yaml.error import MarkedYAMLError +from pipenv.vendor.ruamel.yaml.tokens import * # NOQA +from pipenv.vendor.ruamel.yaml.events import * # NOQA +from pipenv.vendor.ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA +from pipenv.vendor.ruamel.yaml.scanner import BlankLineComment +from pipenv.vendor.ruamel.yaml.comments import C_PRE, C_POST, C_SPLIT_ON_FIRST_BLANK +from pipenv.vendor.ruamel.yaml.compat import _F, nprint, nprintf # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List, Optional # NOQA + +__all__ = ['Parser', 'RoundTripParser', 'ParserError'] + + +def xprintf(*args, **kw): + # type: (Any, Any) -> Any + return nprintf(*args, **kw) + pass + + +class ParserError(MarkedYAMLError): + pass + + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = {'!': '!', '!!': 'tag:yaml.org,2002:'} + + def __init__(self, loader): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_parser', None) is None: + self.loader._parser = self + self.reset_parser() + + def reset_parser(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.current_event = self.last_event = None + self.tag_handles = {} # type: Dict[Any, Any] + self.states = [] # type: List[Any] + self.marks = [] # type: List[Any] + self.state = self.parse_stream_start # type: Any + + def dispose(self): + # type: () -> None + self.reset_parser() + + @property + def scanner(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.scanner + return self.loader._scanner + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_event(self, *choices): + # type: (Any) -> bool + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # type: () -> Any + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # type: () -> Any + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + # assert self.current_event is not None + # if self.current_event.end_mark.line != self.peek_event().start_mark.line: + xprintf('get_event', repr(self.current_event), self.peek_event().start_mark.line) + self.last_event = value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* + # STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + # type: () -> Any + # Parse the stream start. + token = self.scanner.get_token() + self.move_token_comment(token) + event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + # type: () -> Any + # Parse an implicit document. + if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + # type: () -> Any + # Parse any extra document end indicators. + while self.scanner.check_token(DocumentEndToken): + self.scanner.get_token() + # Parse an explicit document. + if not self.scanner.check_token(StreamEndToken): + version, tags = self.process_directives() + if not self.scanner.check_token(DocumentStartToken): + raise ParserError( + None, + None, + _F( + "expected '', but found {pt!r}", + pt=self.scanner.peek_token().id, + ), + self.scanner.peek_token().start_mark, + ) + token = self.scanner.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + # if self.loader is not None and \ + # end_mark.line != self.scanner.peek_token().start_mark.line: + # self.loader.scalar_after_indicator = False + event = DocumentStartEvent( + start_mark, end_mark, explicit=True, version=version, tags=tags, + comment=token.comment + ) # type: Any + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.scanner.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + # type: () -> Any + # Parse the document end. + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.scanner.check_token(DocumentEndToken): + token = self.scanner.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) + + # Prepare the next state. + if self.resolver.processing_version == (1, 1): + self.state = self.parse_document_start + else: + self.state = self.parse_implicit_document_start + + return event + + def parse_document_content(self): + # type: () -> Any + if self.scanner.check_token( + DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken + ): + event = self.process_empty_scalar(self.scanner.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + # type: () -> Any + yaml_version = None + self.tag_handles = {} + while self.scanner.check_token(DirectiveToken): + token = self.scanner.get_token() + if token.name == 'YAML': + if yaml_version is not None: + raise ParserError( + None, None, 'found duplicate YAML directive', token.start_mark + ) + major, minor = token.value + if major != 1: + raise ParserError( + None, + None, + 'found incompatible YAML document (version 1.* is required)', + token.start_mark, + ) + yaml_version = token.value + elif token.name == 'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError( + None, + None, + _F('duplicate tag handle {handle!r}', handle=handle), + token.start_mark, + ) + self.tag_handles[handle] = prefix + if bool(self.tag_handles): + value = yaml_version, self.tag_handles.copy() # type: Any + else: + value = yaml_version, None + if self.loader is not None and hasattr(self.loader, 'tags'): + self.loader.version = yaml_version + if self.loader.tags is None: + self.loader.tags = {} + for k in self.tag_handles: + self.loader.tags[k] = self.tag_handles[k] + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + # type: () -> Any + return self.parse_node(block=True) + + def parse_flow_node(self): + # type: () -> Any + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + # type: () -> Any + return self.parse_node(block=True, indentless_sequence=True) + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + return self.tag_handles[handle] + suffix + + def parse_node(self, block=False, indentless_sequence=False): + # type: (bool, bool) -> Any + if self.scanner.check_token(AliasToken): + token = self.scanner.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any + self.state = self.states.pop() + return event + + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + self.move_token_comment(token) + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.scanner.check_token(TagToken): + token = self.scanner.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.scanner.check_token(TagToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError( + 'while parsing a node', + start_mark, + _F('found undefined tag handle {handle!r}', handle=handle), + tag_mark, + ) + tag = self.transform_tag(handle, suffix) + else: + tag = suffix + # if tag == '!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' + # and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.scanner.peek_token().start_mark + event = None + implicit = tag is None or tag == '!' + if indentless_sequence and self.scanner.check_token(BlockEntryToken): + comment = None + pt = self.scanner.peek_token() + if self.loader and self.loader.comment_handling is None: + if pt.comment and pt.comment[0]: + comment = [pt.comment[0], []] + pt.comment[0] = None + elif self.loader: + if pt.comment: + comment = pt.comment + end_mark = self.scanner.peek_token().end_mark + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_indentless_sequence_entry + return event + + if self.scanner.check_token(ScalarToken): + token = self.scanner.get_token() + # self.scanner.peek_token_same_line_comment(token) + end_mark = token.end_mark + if (token.plain and tag is None) or tag == '!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + # nprint('se', token.value, token.comment) + event = ScalarEvent( + anchor, + tag, + implicit, + token.value, + start_mark, + end_mark, + style=token.style, + comment=token.comment, + ) + self.state = self.states.pop() + elif self.scanner.check_token(FlowSequenceStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_sequence_first_entry + elif self.scanner.check_token(FlowMappingStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = MappingStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_mapping_first_key + elif block and self.scanner.check_token(BlockSequenceStartToken): + end_mark = self.scanner.peek_token().start_mark + # should inserting the comment be dependent on the + # indentation? + pt = self.scanner.peek_token() + comment = pt.comment + # nprint('pt0', type(pt)) + if comment is None or comment[1] is None: + comment = pt.split_old_comment() + # nprint('pt1', comment) + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_sequence_first_entry + elif block and self.scanner.check_token(BlockMappingStartToken): + end_mark = self.scanner.peek_token().start_mark + comment = self.scanner.peek_token().comment + event = MappingStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.scanner.peek_token() + raise ParserError( + _F('while parsing a {node!s} node', node=node), + start_mark, + _F('expected the node content, but found {token_id!r}', token_id=token.id), + token.start_mark, + ) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* + # BLOCK-END + + def parse_block_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + # move any comment from start token + # self.move_token_comment(token) + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block collection', + self.marks[-1], + _F('expected , but found {token_id!r}', token_id=token.id), + token.start_mark, + ) + token = self.scanner.get_token() # BlockEndToken + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + # indentless_sequence? + # sequence: + # - entry + # - nested + + def parse_indentless_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token( + BlockEntryToken, KeyToken, ValueToken, BlockEndToken + ): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.scanner.peek_token() + c = None + if self.loader and self.loader.comment_handling is None: + c = token.comment + start_mark = token.start_mark + else: + start_mark = self.last_event.end_mark # type: ignore + c = self.distribute_comment(token.comment, start_mark.line) # type: ignore + event = SequenceEndEvent(start_mark, start_mark, comment=c) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + # type: () -> Any + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken): + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block mapping', + self.marks[-1], + _F('expected , but found {token_id!r}', token_id=token.id), + token.start_mark, + ) + token = self.scanner.get_token() + self.move_token_comment(token) + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + # value token might have post comment move it to e.g. block + if self.scanner.check_token(ValueToken): + self.move_token_comment(token) + else: + if not self.scanner.check_token(KeyToken): + self.move_token_comment(token, empty=True) + # else: empty value for this key cannot move token.comment + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + comment = token.comment + if comment is None: + token = self.scanner.peek_token() + comment = token.comment + if comment: + token._comment = [None, comment[1]] + comment = [comment[0], None] + return self.process_empty_scalar(token.end_mark, comment=comment) + else: + self.state = self.parse_block_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + # type: (bool) -> Any + if not self.scanner.check_token(FlowSequenceEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow sequence', + self.marks[-1], + _F("expected ',' or ']', but got {token_id!r}", token_id=token.id), + token.start_mark, + ) + + if self.scanner.check_token(KeyToken): + token = self.scanner.peek_token() + event = MappingStartEvent( + None, None, True, token.start_mark, token.end_mark, flow_style=True + ) # type: Any + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.scanner.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.scanner.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + # type: () -> Any + token = self.scanner.get_token() + if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + # type: () -> Any + self.state = self.parse_flow_sequence_entry + token = self.scanner.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + # type: (Any) -> Any + if not self.scanner.check_token(FlowMappingEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow mapping', + self.marks[-1], + _F("expected ',' or '}}', but got {token_id!r}", token_id=token.id), + token.start_mark, + ) + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + if not self.scanner.check_token( + ValueToken, FlowEntryToken, FlowMappingEndToken + ): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif self.resolver.processing_version > (1, 1) and self.scanner.check_token( + ValueToken + ): + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().end_mark) + elif not self.scanner.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.scanner.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + # type: () -> Any + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + + def process_empty_scalar(self, mark, comment=None): + # type: (Any, Any) -> Any + return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment) + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Optional[Any], Optional[bool]) -> Any + pass + + +class RoundTripParser(Parser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + # return self.tag_handles[handle]+suffix + if handle == '!!' and suffix in ( + 'null', + 'bool', + 'int', + 'float', + 'binary', + 'timestamp', + 'omap', + 'pairs', + 'set', + 'str', + 'seq', + 'map', + ): + return Parser.transform_tag(self, handle, suffix) + return handle + suffix + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Optional[Any], Optional[bool]) -> Any + token.move_old_comment(self.scanner.peek_token() if nt is None else nt, empty=empty) + + +class RoundTripParserSC(RoundTripParser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + # some of the differences are based on the superclass testing + # if self.loader.comment_handling is not None + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Any, Any, Optional[bool]) -> None + token.move_new_comment(self.scanner.peek_token() if nt is None else nt, empty=empty) + + def distribute_comment(self, comment, line): + # type: (Any, Any) -> Any + # ToDo, look at indentation of the comment to determine attachment + if comment is None: + return None + if not comment[0]: + return None + if comment[0][0] != line + 1: + nprintf('>>>dcxxx', comment, line) + assert comment[0][0] == line + 1 + # if comment[0] - line > 1: + # return + typ = self.loader.comment_handling & 0b11 + # nprintf('>>>dca', comment, line, typ) + if typ == C_POST: + return None + if typ == C_PRE: + c = [None, None, comment[0]] + comment[0] = None + return c + # nprintf('>>>dcb', comment[0]) + for _idx, cmntidx in enumerate(comment[0]): + # nprintf('>>>dcb', cmntidx) + if isinstance(self.scanner.comments[cmntidx], BlankLineComment): + break + else: + return None # no space found + if _idx == 0: + return None # first line was blank + # nprintf('>>>dcc', idx) + if typ == C_SPLIT_ON_FIRST_BLANK: + c = [None, None, comment[0][:_idx]] + comment[0] = comment[0][_idx:] + return c + raise NotImplementedError # reserved diff --git a/pipenv/vendor/ruamel/yaml/py.typed b/pipenv/vendor/ruamel/yaml/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pipenv/vendor/ruamel/yaml/reader.py b/pipenv/vendor/ruamel/yaml/reader.py new file mode 100644 index 0000000000..d020e3903c --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/reader.py @@ -0,0 +1,302 @@ +# coding: utf-8 + +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` +# characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current +# character. + +import codecs + +from pipenv.vendor.ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError +from pipenv.vendor.ruamel.yaml.compat import _F # NOQA +from pipenv.vendor.ruamel.yaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA +# from ruamel.yaml.compat import StreamTextType # NOQA + +__all__ = ['Reader', 'ReaderError'] + + +class ReaderError(YAMLError): + def __init__(self, name, position, character, encoding, reason): + # type: (Any, Any, Any, Any, Any) -> None + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + # type: () -> Any + if isinstance(self.character, bytes): + return _F( + "'{self_encoding!s}' codec can't decode byte #x{ord_self_character:02x}: " + '{self_reason!s}\n' + ' in "{self_name!s}", position {self_position:d}', + self_encoding=self.encoding, + ord_self_character=ord(self.character), + self_reason=self.reason, + self_name=self.name, + self_position=self.position, + ) + else: + return _F( + 'unacceptable character #x{self_character:04x}: {self_reason!s}\n' + ' in "{self_name!s}", position {self_position:d}', + self_character=self.character, + self_reason=self.reason, + self_name=self.name, + self_position=self.position, + ) + + +class Reader: + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `bytes` object, + # - a `str` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream, loader=None): + # type: (Any, Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_reader', None) is None: + self.loader._reader = self + self.reset_reader() + self.stream = stream # type: Any # as .read is called + + def reset_reader(self): + # type: () -> None + self.name = None # type: Any + self.stream_pointer = 0 + self.eof = True + self.buffer = "" + self.pointer = 0 + self.raw_buffer = None # type: Any + self.raw_decode = None + self.encoding = None # type: Optional[Text] + self.index = 0 + self.line = 0 + self.column = 0 + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('input stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + self._stream = None + if isinstance(val, str): + self.name = '' + self.check_printable(val) + self.buffer = val + '\0' + elif isinstance(val, bytes): + self.name = '' + self.raw_buffer = val + self.determine_encoding() + else: + if not hasattr(val, 'read'): + raise YAMLStreamError('stream argument needs to have a read() method') + self._stream = val + self.name = getattr(self.stream, 'name', '') + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + # type: (int) -> Text + try: + return self.buffer[self.pointer + index] + except IndexError: + self.update(index + 1) + return self.buffer[self.pointer + index] + + def prefix(self, length=1): + # type: (int) -> Any + if self.pointer + length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer : self.pointer + length] + + def forward_1_1(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in '\n\x85\u2028\u2029' or ( + ch == '\r' and self.buffer[self.pointer] != '\n' + ): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def forward(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch == '\n' or (ch == '\r' and self.buffer[self.pointer] != '\n'): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + # type: () -> Any + if self.stream is None: + return StringMark( + self.name, self.index, self.line, self.column, self.buffer, self.pointer + ) + else: + return FileMark(self.name, self.index, self.line, self.column) + + def determine_encoding(self): + # type: () -> None + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, bytes): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode # type: ignore + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode # type: ignore + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode # type: ignore + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = RegExp( + '[^\x09\x0A\x0D\x20-\x7E\x85' '\xA0-\uD7FF' '\uE000-\uFFFD' '\U00010000-\U0010FFFF' ']' + ) + + _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii') + + @classmethod + def _get_non_printable_ascii(cls, data): # type: ignore + # type: (Text, bytes) -> Optional[Tuple[int, Text]] + ascii_bytes = data.encode('ascii') # type: ignore + non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore + if not non_printables: + return None + non_printable = non_printables[:1] + return ascii_bytes.index(non_printable), non_printable.decode('ascii') + + @classmethod + def _get_non_printable_regex(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + match = cls.NON_PRINTABLE.search(data) + if not bool(match): + return None + return match.start(), match.group() + + @classmethod + def _get_non_printable(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + try: + return cls._get_non_printable_ascii(data) # type: ignore + except UnicodeEncodeError: + return cls._get_non_printable_regex(data) + + def check_printable(self, data): + # type: (Any) -> None + non_printable_match = self._get_non_printable(data) + if non_printable_match is not None: + start, character = non_printable_match + position = self.index + (len(self.buffer) - self.pointer) + start + raise ReaderError( + self.name, + position, + ord(character), + 'unicode', + 'special characters are not allowed', + ) + + def update(self, length): + # type: (int) -> None + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer :] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof) + except UnicodeDecodeError as exc: + character = self.raw_buffer[exc.start] + if self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + elif self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=None): + # type: (Optional[int]) -> None + if size is None: + size = 4096 + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True + + +# try: +# import psyco +# psyco.bind(Reader) +# except ImportError: +# pass diff --git a/pipenv/vendor/ruamel/yaml/representer.py b/pipenv/vendor/ruamel/yaml/representer.py new file mode 100644 index 0000000000..f1658951d7 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/representer.py @@ -0,0 +1,1156 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.error import * # NOQA +from pipenv.vendor.ruamel.yaml.nodes import * # NOQA +from pipenv.vendor.ruamel.yaml.compat import ordereddict +from pipenv.vendor.ruamel.yaml.compat import _F, nprint, nprintf # NOQA +from pipenv.vendor.ruamel.yaml.scalarstring import ( + LiteralScalarString, + FoldedScalarString, + SingleQuotedScalarString, + DoubleQuotedScalarString, + PlainScalarString, +) +from pipenv.vendor.ruamel.yaml.comments import ( + CommentedMap, + CommentedOrderedMap, + CommentedSeq, + CommentedKeySeq, + CommentedKeyMap, + CommentedSet, + comment_attrib, + merge_attrib, + TaggedScalar, +) +from pipenv.vendor.ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from pipenv.vendor.ruamel.yaml.scalarfloat import ScalarFloat +from pipenv.vendor.ruamel.yaml.scalarbool import ScalarBoolean +from pipenv.vendor.ruamel.yaml.timestamp import TimeStamp +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +import datetime +import sys +import types + +import copyreg +import base64 + +if False: # MYPY + from typing import Dict, List, Any, Union, Text, Optional # NOQA + +# fmt: off +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError', 'RoundTripRepresenter'] +# fmt: on + + +class RepresenterError(YAMLError): + pass + + +class BaseRepresenter: + + yaml_representers = {} # type: Dict[Any, Any] + yaml_multi_representers = {} # type: Dict[Any, Any] + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any, Any) -> None + self.dumper = dumper + if self.dumper is not None: + self.dumper._representer = self + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} # type: Dict[Any, Any] + self.object_keeper = [] # type: List[Any] + self.alias_key = None # type: Optional[int] + self.sort_base_mapping_type_on_output = True + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + def represent(self, data): + # type: (Any) -> None + node = self.represent_data(data) + self.serializer.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + # type: (Any) -> Any + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + # if node is None: + # raise RepresenterError( + # f"recursive objects are not allowed: {data!r}") + return node + # self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, str(data)) + # if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def represent_key(self, data): + # type: (Any) -> Any + """ + David Fraser: Extract a method to represent keys in mappings, so that + a subclass can choose not to quote them (for example) + used in represent_mapping + https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c + """ + return self.represent_data(data) + + @classmethod + def add_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_representers' not in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_multi_representers' not in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if style is None: + style = self.default_style + comment = None + if style and style[0] in '|>': + comment = getattr(value, 'comment', None) + if comment: + comment = [None, [comment]] + node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_base_mapping_type_on_output: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + # type: (Any) -> bool + return False + + +class SafeRepresenter(BaseRepresenter): + def ignore_aliases(self, data): + # type: (Any) -> bool + # https://docs.python.org/3/reference/expressions.html#parenthesized-forms : + # "i.e. two occurrences of the empty tuple may or may not yield the same object" + # so "data is ()" should not be used + if data is None or (isinstance(data, tuple) and data == ()): + return True + if isinstance(data, (bytes, str, bool, int, float)): + return True + return False + + def represent_none(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + + def represent_str(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + # type: (Any) -> Any + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + # check py2 only? + data = base64.encodestring(data).decode('ascii') # type: ignore + return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') + + def represent_bool(self, data, anchor=None): + # type: (Any, Optional[Any]) -> Any + try: + value = self.dumper.boolean_representation[bool(data)] + except AttributeError: + if data: + value = 'true' + else: + value = 'false' + return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor) + + def represent_int(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:int', str(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value * inf_value): + inf_value *= inf_value + + def represent_float(self, data): + # type: (Any) -> Any + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + else: + value = repr(data).lower() + if getattr(self.serializer, 'use_version', None) == (1, 1): + if '.' not in value and 'e' in value: + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag in YAML 1.1. We fix + # this by adding '.0' before the 'e' symbol. + value = value.replace('e', '.0e', 1) + return self.represent_scalar('tag:yaml.org,2002:float', value) + + def represent_list(self, data): + # type: (Any) -> Any + # pairs = (len(data) > 0 and isinstance(data, list)) + # if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + # if not pairs: + return self.represent_sequence('tag:yaml.org,2002:seq', data) + + # value = [] + # for item_key, item_value in data: + # value.append(self.represent_mapping('tag:yaml.org,2002:map', + # [(item_key, item_value)])) + # return SequenceNode('tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + # type: (Any) -> Any + return self.represent_mapping('tag:yaml.org,2002:map', data) + + def represent_ordereddict(self, data): + # type: (Any) -> Any + return self.represent_omap('tag:yaml.org,2002:omap', data) + + def represent_set(self, data): + # type: (Any) -> Any + value = {} # type: Dict[Any, None] + for key in data: + value[key] = None + return self.represent_mapping('tag:yaml.org,2002:set', value) + + def represent_date(self, data): + # type: (Any) -> Any + value = data.isoformat() + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + # type: (Any) -> Any + value = data.isoformat(' ') + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + # type: (Any, Any, Any, Any) -> Any + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + # type: (Any) -> None + raise RepresenterError(_F('cannot represent an object: {data!s}', data=data)) + + +SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict) + +if sys.version_info >= (2, 7): + import collections + + SafeRepresenter.add_representer( + collections.OrderedDict, SafeRepresenter.represent_ordereddict + ) + +SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) + + +class Representer(SafeRepresenter): + def represent_complex(self, data): + # type: (Any) -> Any + if data.imag == 0.0: + data = repr(data.real) + elif data.real == 0.0: + data = _F('{data_imag!r}j', data_imag=data.imag) + elif data.imag > 0: + data = _F('{data_real!r}+{data_imag!r}j', data_real=data.real, data_imag=data.imag) + else: + data = _F('{data_real!r}{data_imag!r}j', data_real=data.real, data_imag=data.imag) + return self.represent_scalar('tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + # type: (Any) -> Any + return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + # type: (Any) -> Any + try: + name = _F( + '{modname!s}.{qualname!s}', modname=data.__module__, qualname=data.__qualname__ + ) + except AttributeError: + # ToDo: check if this can be reached in Py3 + name = _F('{modname!s}.{name!s}', modname=data.__module__, name=data.__name__) + return self.represent_scalar('tag:yaml.org,2002:python/name:' + name, "") + + def represent_module(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:python/module:' + data.__name__, "") + + def represent_object(self, data): + # type: (Any) -> Any + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: # type: ignore + reduce = copyreg.dispatch_table[cls](data) # type: ignore + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError(_F('cannot represent object: {data!r}', data=data)) + reduce = (list(reduce) + [None] * 5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = 'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = 'tag:yaml.org,2002:python/object/apply:' + newobj = False + try: + function_name = _F( + '{fun!s}.{qualname!s}', fun=function.__module__, qualname=function.__qualname__ + ) + except AttributeError: + # ToDo: check if this can be reached in Py3 + function_name = _F( + '{fun!s}.{name!s}', fun=function.__module__, name=function.__name__ + ) + if not args and not listitems and not dictitems and isinstance(state, dict) and newobj: + return self.represent_mapping( + 'tag:yaml.org,2002:python/object:' + function_name, state + ) + if not listitems and not dictitems and isinstance(state, dict) and not state: + return self.represent_sequence(tag + function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag + function_name, value) + + +Representer.add_representer(complex, Representer.represent_complex) + +Representer.add_representer(tuple, Representer.represent_tuple) + +Representer.add_representer(type, Representer.represent_name) + +Representer.add_representer(types.FunctionType, Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) + +Representer.add_representer(types.ModuleType, Representer.represent_module) + +Representer.add_multi_representer(object, Representer.represent_object) + +Representer.add_multi_representer(type, Representer.represent_name) + + +class RoundTripRepresenter(SafeRepresenter): + # need to add type here and write out the .comment + # in serializer and emitter + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any) -> None + if not hasattr(dumper, 'typ') and default_flow_style is None: + default_flow_style = False + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=dumper, + ) + + def ignore_aliases(self, data): + # type: (Any) -> bool + try: + if data.anchor is not None and data.anchor.value is not None: + return False + except AttributeError: + pass + return SafeRepresenter.ignore_aliases(self, data) + + def represent_none(self, data): + # type: (Any) -> Any + if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start: + # this will be open ended (although it is not yet) + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + return self.represent_scalar('tag:yaml.org,2002:null', "") + + def represent_literal_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '|' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + represent_preserved_scalarstring = represent_literal_scalarstring + + def represent_folded_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '>' + anchor = data.yaml_anchor(any=True) + for fold_pos in reversed(getattr(data, 'fold_pos', [])): + if ( + data[fold_pos] == ' ' + and (fold_pos > 0 and not data[fold_pos - 1].isspace()) + and (fold_pos < len(data) and not data[fold_pos + 1].isspace()) + ): + data = data[:fold_pos] + '\a' + data[fold_pos:] + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_single_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = "'" + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_double_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '"' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_plain_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def insert_underscore(self, prefix, s, underscore, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if underscore is None: + return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor) + if underscore[0]: + sl = list(s) + pos = len(s) - underscore[0] + while pos > 0: + sl.insert(pos, '_') + pos -= underscore[0] + s = "".join(sl) + if underscore[1]: + s = '_' + s + if underscore[2]: + s += '_' + return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor) + + def represent_scalar_int(self, data): + # type: (Any) -> Any + if data._width is not None: + s = '{:0{}d}'.format(data, data._width) + else: + s = format(data, 'd') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore("", s, data._underscore, anchor=anchor) + + def represent_binary_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}b}', that strips the zeros + s = '{:0{}b}'.format(data, data._width) + else: + s = format(data, 'b') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0b', s, data._underscore, anchor=anchor) + + def represent_octal_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}o}', that strips the zeros + s = '{:0{}o}'.format(data, data._width) + else: + s = format(data, 'o') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0o', s, data._underscore, anchor=anchor) + + def represent_hex_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}x}', that strips the zeros + s = '{:0{}x}'.format(data, data._width) + else: + s = format(data, 'x') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_hex_caps_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}X}', that strips the zeros + s = '{:0{}X}'.format(data, data._width) + else: + s = format(data, 'X') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_scalar_float(self, data): + # type: (Any) -> Any + """ this is way more complicated """ + value = None + anchor = data.yaml_anchor(any=True) + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + if value: + return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor) + if data._exp is None and data._prec > 0 and data._prec == data._width - 1: + # no exponent, but trailing dot + value = '{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data))) + elif data._exp is None: + # no exponent, "normal" dot + prec = data._prec + ms = data._m_sign if data._m_sign else "" + # -1 for the dot + value = '{}{:0{}.{}f}'.format( + ms, abs(data), data._width - len(ms), data._width - prec - 1 + ) + if prec == 0 or (prec == 1 and ms != ""): + value = value.replace('0.', '.') + while len(value) < data._width: + value += '0' + else: + # exponent + m, es = '{:{}.{}e}'.format( + # data, data._width, data._width - data._prec + (1 if data._m_sign else 0) + data, + data._width, + data._width + (1 if data._m_sign else 0), + ).split('e') + w = data._width if data._prec > 0 else (data._width + 1) + if data < 0: + w += 1 + m = m[:w] + e = int(es) + m1, m2 = m.split('.') # always second? + while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0): + m2 += '0' + if data._m_sign and data > 0: + m1 = '+' + m1 + esgn = '+' if data._e_sign else "" + if data._prec < 0: # mantissa without dot + if m2 != '0': + e -= len(m2) + else: + m2 = "" + while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width: + m2 += '0' + e -= 1 + value = m1 + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width) + elif data._prec == 0: # mantissa with trailing dot + e -= len(m2) + value = m1 + m2 + '.' + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width) + else: + if data._m_lead0 > 0: + m2 = '0' * (data._m_lead0 - 1) + m1 + m2 + m1 = '0' + m2 = m2[: -data._m_lead0] # these should be zeros + e += data._m_lead0 + while len(m1) < data._prec: + m1 += m2[0] + m2 = m2[1:] + e -= 1 + value = m1 + '.' + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width) + + if value is None: + value = repr(data).lower() + return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor) + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + # if the flow_style is None, the flow style tacked on to the object + # explicitly will be taken. If that is None as well the default flow + # style rules + try: + flow_style = sequence.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = sequence.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(sequence, comment_attrib) + node.comment = comment.comment + # reset any comment already printed information + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + item_comments = comment.items + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for idx, item in enumerate(sequence): + node_item = self.represent_data(item) + self.merge_comments(node_item, item_comments.get(idx)) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if len(sequence) != 0 and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def merge_comments(self, node, comments): + # type: (Any, Any) -> Any + if comments is None: + assert hasattr(node, 'comment') + return node + if getattr(node, 'comment', None) is not None: + for idx, val in enumerate(comments): + if idx >= len(node.comment): + continue + nc = node.comment[idx] + if nc is not None: + assert val is None or val == nc + comments[idx] = nc + node.comment = comments + return node + + def represent_key(self, data): + # type: (Any) -> Any + if isinstance(data, CommentedKeySeq): + self.alias_key = None + return self.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True) + if isinstance(data, CommentedKeyMap): + self.alias_key = None + return self.represent_mapping('tag:yaml.org,2002:map', data, flow_style=True) + return SafeRepresenter.represent_key(self, data) + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = mapping.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = mapping.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(mapping, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + if self.dumper.comment_handling is None: + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + else: + # NEWCMNT + pass + except AttributeError: + item_comments = {} + merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])] + try: + merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0] + except IndexError: + merge_pos = 0 + item_count = 0 + if bool(merge_list): + items = mapping.non_merged_items() + else: + items = mapping.items() + for item_key, item_value in items: + item_count += 1 + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + item_comment = item_comments.get(item_key) + if item_comment: + # assert getattr(node_key, 'comment', None) is None + # issue 351 did throw this because the comment from the list item was + # moved to the dict + node_key.comment = item_comment[:2] + nvc = getattr(node_value, 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_value.comment = item_comment[2:] + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + if bool(merge_list): + # because of the call to represent_data here, the anchors + # are marked as being used and thereby created + if len(merge_list) == 1: + arg = self.represent_data(merge_list[0]) + else: + arg = self.represent_data(merge_list) + arg.flow_style = True + value.insert(merge_pos, (ScalarNode('tag:yaml.org,2002:merge', '<<'), arg)) + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = omap.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = omap.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(omap, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # node_item.flow_style = False + # node item has two scalars in value: node_key and node_value + item_comment = item_comments.get(item_key) + if item_comment: + if item_comment[1]: + node_item.comment = [None, item_comment[1]] + assert getattr(node_item.value[0][0], 'comment', None) is None + node_item.value[0][0].comment = [item_comment[0], None] + nvc = getattr(node_item.value[0][1], 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_item.value[0][1].comment = item_comment[2:] + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_set(self, setting): + # type: (Any) -> Any + flow_style = False + tag = 'tag:yaml.org,2002:set' + # return self.represent_mapping(tag, value) + value = [] # type: List[Any] + flow_style = setting.fa.flow_style(flow_style) + try: + anchor = setting.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(setting, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in setting.odict: + node_key = self.represent_key(item_key) + node_value = self.represent_data(None) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + node_key.style = node_value.style = '?' + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + best_style = best_style + return node + + def represent_dict(self, data): + # type: (Any) -> Any + """write out tag if saved on loading""" + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = 'tag:yaml.org,2002:map' + return self.represent_mapping(tag, data) + + def represent_list(self, data): + # type: (Any) -> Any + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = 'tag:yaml.org,2002:seq' + return self.represent_sequence(tag, data) + + def represent_datetime(self, data): + # type: (Any) -> Any + inter = 'T' if data._yaml['t'] else ' ' + _yaml = data._yaml + if _yaml['delta']: + data += _yaml['delta'] + value = data.isoformat(inter) + else: + value = data.isoformat(inter) + if _yaml['tz']: + value += _yaml['tz'] + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_tagged_scalar(self, data): + # type: (Any) -> Any + try: + tag = data.tag.value + except AttributeError: + tag = None + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor) + + def represent_scalar_bool(self, data): + # type: (Any) -> Any + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return SafeRepresenter.represent_bool(self, data, anchor=anchor) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + # type: (Any, Any, Any, Optional[Any]) -> Any + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + anchor = state.pop(Anchor.attrib, None) + res = self.represent_mapping(tag, state, flow_style=flow_style) + if anchor is not None: + res.anchor = anchor + return res + + +RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none) + +RoundTripRepresenter.add_representer( + LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring +) + +RoundTripRepresenter.add_representer( + FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring +) + +RoundTripRepresenter.add_representer( + SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring +) + +# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple) + +RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int) + +RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int) + +RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int) + +RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int) + +RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int) + +RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float) + +RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool) + +RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list) + +RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict) + +RoundTripRepresenter.add_representer( + CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict +) + +if sys.version_info >= (2, 7): + import collections + + RoundTripRepresenter.add_representer( + collections.OrderedDict, RoundTripRepresenter.represent_ordereddict + ) + +RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set) + +RoundTripRepresenter.add_representer( + TaggedScalar, RoundTripRepresenter.represent_tagged_scalar +) + +RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime) diff --git a/pipenv/vendor/ruamel/yaml/resolver.py b/pipenv/vendor/ruamel/yaml/resolver.py new file mode 100644 index 0000000000..2294e5b18d --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/resolver.py @@ -0,0 +1,405 @@ +# coding: utf-8 + +import re + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import VersionType # NOQA + +from pipenv.vendor.ruamel.yaml.compat import _DEFAULT_YAML_VERSION, _F # NOQA +from pipenv.vendor.ruamel.yaml.error import * # NOQA +from pipenv.vendor.ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA +from pipenv.vendor.ruamel.yaml.util import RegExp # NOQA + +__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver'] + + +# fmt: off +# resolvers consist of +# - a list of applicable version +# - a tag +# - a regexp +# - a list of first characters to match +implicit_resolvers = [ + ([(1, 2)], + 'tag:yaml.org,2002:bool', + RegExp('''^(?:true|True|TRUE|false|False|FALSE)$''', re.X), + list('tTfF')), + ([(1, 1)], + 'tag:yaml.org,2002:bool', + RegExp('''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list('yYnNtTfFoO')), + ([(1, 2)], + 'tag:yaml.org,2002:float', + RegExp('''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')), + ([(1, 1)], + 'tag:yaml.org,2002:float', + RegExp('''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')), + ([(1, 2)], + 'tag:yaml.org,2002:int', + RegExp('''^(?:[-+]?0b[0-1_]+ + |[-+]?0o?[0-7_]+ + |[-+]?[0-9_]+ + |[-+]?0x[0-9a-fA-F_]+)$''', re.X), + list('-+0123456789')), + ([(1, 1)], + 'tag:yaml.org,2002:int', + RegExp('''^(?:[-+]?0b[0-1_]+ + |[-+]?0?[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int + list('-+0123456789')), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:merge', + RegExp('^(?:<<)$'), + ['<']), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:null', + RegExp('''^(?: ~ + |null|Null|NULL + | )$''', re.X), + ['~', 'n', 'N', '']), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:timestamp', + RegExp('''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \\t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)? + (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list('0123456789')), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:value', + RegExp('^(?:=)$'), + ['=']), + # The following resolver is only for documentation purposes. It cannot work + # because plain scalars cannot start with '!', '&', or '*'. + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:yaml', + RegExp('^(?:!|&|\\*)$'), + list('!&*')), +] +# fmt: on + + +class ResolverError(YAMLError): + pass + + +class BaseResolver: + + DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} # type: Dict[Any, Any] + yaml_path_resolvers = {} # type: Dict[Any, Any] + + def __init__(self, loadumper=None): + # type: (Any, Any) -> None + self.loadumper = loadumper + if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None: + self.loadumper._resolver = self.loadumper + self._loader_version = None # type: Any + self.resolver_exact_paths = [] # type: List[Any] + self.resolver_prefix_paths = [] # type: List[Any] + + @property + def parser(self): + # type: () -> Any + if self.loadumper is not None: + if hasattr(self.loadumper, 'typ'): + return self.loadumper.parser + return self.loadumper._parser + return None + + @classmethod + def add_implicit_resolver_base(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first)) + + # @classmethod + # def add_implicit_resolver(cls, tag, regexp, first): + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # type: (Any, Any, Any) -> None + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if 'yaml_path_resolvers' not in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] # type: List[Any] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError( + _F('Invalid path element: {element!s}', element=element) + ) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif ( + node_check not in [ScalarNode, SequenceNode, MappingNode] + and not isinstance(node_check, str) + and node_check is not None + ): + raise ResolverError( + _F('Invalid node checker: {node_check!s}', node_check=node_check) + ) + if not isinstance(index_check, (str, int)) and index_check is not None: + raise ResolverError( + _F('Invalid index checker: {index_check!s}', index_check=index_check) + ) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None: + raise ResolverError(_F('Invalid node kind: {kind!s}', kind=kind)) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + # type: (Any, Any) -> None + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + # type: () -> None + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, current_node, current_index): + # type: (int, Any, Any, Any, Any) -> bool + node_check, index_check = path[depth - 1] + if isinstance(node_check, str): + if current_node.tag != node_check: + return False + elif node_check is not None: + if not isinstance(current_node, node_check): + return False + if index_check is True and current_index is not None: + return False + if (index_check is False or index_check is None) and current_index is None: + return False + if isinstance(index_check, str): + if not ( + isinstance(current_index, ScalarNode) and index_check == current_index.value + ): + return False + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return False + return True + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.yaml_implicit_resolvers.get("", []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + return None + + +class Resolver(BaseResolver): + pass + + +for ir in implicit_resolvers: + if (1, 2) in ir[0]: + Resolver.add_implicit_resolver_base(*ir[1:]) + + +class VersionedResolver(BaseResolver): + """ + contrary to the "normal" resolver, the smart resolver delays loading + the pattern matching rules. That way it can decide to load 1.1 rules + or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals + and Yes/No/On/Off booleans. + """ + + def __init__(self, version=None, loader=None, loadumper=None): + # type: (Optional[VersionType], Any, Any) -> None + if loader is None and loadumper is not None: + loader = loadumper + BaseResolver.__init__(self, loader) + self._loader_version = self.get_loader_version(version) + self._version_implicit_resolver = {} # type: Dict[Any, Any] + + def add_version_implicit_resolver(self, version, tag, regexp, first): + # type: (VersionType, Any, Any, Any) -> None + if first is None: + first = [None] + impl_resolver = self._version_implicit_resolver.setdefault(version, {}) + for ch in first: + impl_resolver.setdefault(ch, []).append((tag, regexp)) + + def get_loader_version(self, version): + # type: (Optional[VersionType]) -> Any + if version is None or isinstance(version, tuple): + return version + if isinstance(version, list): + return tuple(version) + # assume string + return tuple(map(int, version.split('.'))) + + @property + def versioned_resolver(self): + # type: () -> Any + """ + select the resolver based on the version we are parsing + """ + version = self.processing_version + if isinstance(version, str): + version = tuple(map(int, version.split('.'))) + if version not in self._version_implicit_resolver: + for x in implicit_resolvers: + if version in x[0]: + self.add_version_implicit_resolver(version, x[1], x[2], x[3]) + return self._version_implicit_resolver[version] + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.versioned_resolver.get("", []) + else: + resolvers = self.versioned_resolver.get(value[0], []) + resolvers += self.versioned_resolver.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + try: + version = self.loadumper._scanner.yaml_version + except AttributeError: + try: + if hasattr(self.loadumper, 'typ'): + version = self.loadumper.version + else: + version = self.loadumper._serializer.use_version # dumping + except AttributeError: + version = None + if version is None: + version = self._loader_version + if version is None: + version = _DEFAULT_YAML_VERSION + return version diff --git a/pipenv/vendor/ruamel/yaml/scalarbool.py b/pipenv/vendor/ruamel/yaml/scalarbool.py new file mode 100644 index 0000000000..3133ae5e27 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/scalarbool.py @@ -0,0 +1,47 @@ +# coding: utf-8 + +""" +You cannot subclass bool, and this is necessary for round-tripping anchored +bool values (and also if you want to preserve the original way of writing) + +bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well. + +You can use these in an if statement, but not when testing equivalence +""" + +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarBoolean'] + + +class ScalarBoolean(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + anchor = kw.pop('anchor', None) + b = int.__new__(cls, *args, **kw) + if anchor is not None: + b.yaml_set_anchor(anchor, always_dump=True) + return b + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump diff --git a/pipenv/vendor/ruamel/yaml/scalarfloat.py b/pipenv/vendor/ruamel/yaml/scalarfloat.py new file mode 100644 index 0000000000..1ed91e3f94 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/scalarfloat.py @@ -0,0 +1,124 @@ +# coding: utf-8 + +import sys +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat'] + + +class ScalarFloat(float): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) + prec = kw.pop('prec', None) + m_sign = kw.pop('m_sign', None) + m_lead0 = kw.pop('m_lead0', 0) + exp = kw.pop('exp', None) + e_width = kw.pop('e_width', None) + e_sign = kw.pop('e_sign', None) + underscore = kw.pop('underscore', None) + anchor = kw.pop('anchor', None) + v = float.__new__(cls, *args, **kw) + v._width = width + v._prec = prec + v._m_sign = m_sign + v._m_lead0 = m_lead0 + v._exp = exp + v._e_width = e_width + v._e_sign = e_sign + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) + a + x = type(self)(self + a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) // a + x = type(self)(self // a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) * a + x = type(self)(self * a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + x._prec = self._prec # check for others + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) ** a + x = type(self)(self ** a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) - a + x = type(self)(self - a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + def dump(self, out=sys.stdout): + # type: (Any) -> Any + out.write( + 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format( + self, + self._width, # type: ignore + self._prec, # type: ignore + self._m_sign, # type: ignore + self._m_lead0, # type: ignore + self._underscore, # type: ignore + self._exp, # type: ignore + self._e_width, # type: ignore + self._e_sign, # type: ignore + ) + ) + + +class ExponentialFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) + + +class ExponentialCapsFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) diff --git a/pipenv/vendor/ruamel/yaml/scalarint.py b/pipenv/vendor/ruamel/yaml/scalarint.py new file mode 100644 index 0000000000..c655473c0e --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/scalarint.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt'] + + +class ScalarInt(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) + underscore = kw.pop('underscore', None) + anchor = kw.pop('anchor', None) + v = int.__new__(cls, *args, **kw) + v._width = width + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self + a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self // a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self * a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self ** a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self - a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class BinaryInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class OctalInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +# mixed casing of A-F is not supported, when loading the first non digit +# determines the case + + +class HexInt(ScalarInt): + """uses lower case (a-f)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class HexCapsInt(ScalarInt): + """uses upper case (A-F)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class DecimalInt(ScalarInt): + """needed if anchor""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) diff --git a/pipenv/vendor/ruamel/yaml/scalarstring.py b/pipenv/vendor/ruamel/yaml/scalarstring.py new file mode 100644 index 0000000000..1514be52ca --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/scalarstring.py @@ -0,0 +1,152 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = [ + 'ScalarString', + 'LiteralScalarString', + 'FoldedScalarString', + 'SingleQuotedScalarString', + 'DoubleQuotedScalarString', + 'PlainScalarString', + # PreservedScalarString is the old name, as it was the first to be preserved on rt, + # use LiteralScalarString instead + 'PreservedScalarString', +] + + +class ScalarString(str): + __slots__ = Anchor.attrib + + def __new__(cls, *args, **kw): + # type: (Any, Any) -> Any + anchor = kw.pop('anchor', None) + ret_val = str.__new__(cls, *args, **kw) + if anchor is not None: + ret_val.yaml_set_anchor(anchor, always_dump=True) + return ret_val + + def replace(self, old, new, maxreplace=-1): + # type: (Any, Any, int) -> Any + return type(self)((str.replace(self, old, new, maxreplace))) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class LiteralScalarString(ScalarString): + __slots__ = 'comment' # the comment after the | on the first line + + style = '|' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +PreservedScalarString = LiteralScalarString + + +class FoldedScalarString(ScalarString): + __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line + + style = '>' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class SingleQuotedScalarString(ScalarString): + __slots__ = () + + style = "'" + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class DoubleQuotedScalarString(ScalarString): + __slots__ = () + + style = '"' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class PlainScalarString(ScalarString): + __slots__ = () + + style = '' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +def preserve_literal(s): + # type: (Text) -> Text + return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n')) + + +def walk_tree(base, map=None): + # type: (Any, Any) -> None + """ + the routine here walks over a simple yaml tree (recursing in + dict values and list items) and converts strings that + have multiple lines to literal scalars + + You can also provide an explicit (ordered) mapping for multiple transforms + (first of which is executed): + map = ruamel.yaml.compat.ordereddict + map['\n'] = preserve_literal + map[':'] = SingleQuotedScalarString + walk_tree(data, map=map) + """ + from collections.abc import MutableMapping, MutableSequence + + if map is None: + map = {'\n': preserve_literal} + + if isinstance(base, MutableMapping): + for k in base: + v = base[k] # type: Text + if isinstance(v, str): + for ch in map: + if ch in v: + base[k] = map[ch](v) + break + else: + walk_tree(v, map=map) + elif isinstance(base, MutableSequence): + for idx, elem in enumerate(base): + if isinstance(elem, str): + for ch in map: + if ch in elem: + base[idx] = map[ch](elem) + break + else: + walk_tree(elem, map=map) diff --git a/pipenv/vendor/ruamel/yaml/scanner.py b/pipenv/vendor/ruamel/yaml/scanner.py new file mode 100644 index 0000000000..fb6d4a2bfe --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/scanner.py @@ -0,0 +1,2444 @@ +# coding: utf-8 + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# RoundTripScanner +# COMMENT(value) +# +# Read comments in the Scanner code for more details. +# + +import inspect +from pipenv.vendor.ruamel.yaml.error import MarkedYAMLError, CommentMark # NOQA +from pipenv.vendor.ruamel.yaml.tokens import * # NOQA +from pipenv.vendor.ruamel.yaml.compat import _F, check_anchorname_char, nprint, nprintf # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text # NOQA + from pipenv.vendor.ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError'] + + +_THE_END = '\n\0\r\x85\u2028\u2029' +_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029' +_SPACE_TAB = ' \t' + + +def xprintf(*args, **kw): + # type: (Any, Any) -> Any + return nprintf(*args, **kw) + pass + + +class ScannerError(MarkedYAMLError): + pass + + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + # type: (Any, Any, int, int, int, Any) -> None + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + + +class Scanner: + def __init__(self, loader=None): + # type: (Any) -> None + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer + + self.loader = loader + if self.loader is not None and getattr(self.loader, '_scanner', None) is None: + self.loader._scanner = self + self.reset_scanner() + self.first_time = False + self.yaml_version = None # type: Any + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def reset_scanner(self): + # type: () -> None + # Had we reached the end of the stream? + self.done = False + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # List of processed tokens that are not yet emitted. + self.tokens = [] # type: List[Any] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] # type: List[int] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} # type: Dict[Any, Any] + + @property + def reader(self): + # type: () -> Any + try: + return self._scanner_reader # type: ignore + except AttributeError: + if hasattr(self.loader, 'typ'): + self._scanner_reader = self.loader.reader + else: + self._scanner_reader = self.loader._reader + return self._scanner_reader + + @property + def scanner_processing_version(self): # prefix until un-composited + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver.processing_version + return self.loader.processing_version + + # Public methods. + + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + return self.tokens[0] + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + # type: () -> bool + if self.done: + return False + if len(self.tokens) == 0: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + return False + + def fetch_comment(self, comment): + # type: (Any) -> None + raise NotImplementedError + + def fetch_more_tokens(self): + # type: () -> Any + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + if comment is not None: # never happens for base scanner + return self.fetch_comment(comment) + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.reader.column) + + # Peek the next character. + ch = self.reader.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + # if ch == '\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == "'": + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError( + 'while scanning for the next token', + None, + _F('found character {ch!r} that cannot start any token', ch=ch), + self.reader.get_mark(), + ) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # type: () -> Any + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # type: () -> None + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.reader.line or self.reader.index - key.index > 1024: + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # type: () -> None + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.reader.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken + len(self.tokens) + key = SimpleKey( + token_number, + required, + self.reader.index, + self.reader.line, + self.reader.column, + self.reader.get_mark(), + ) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # type: () -> None + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + # type: (Any) -> None + # In flow context, tokens should respect indentation. + # Actually the condition should be `self.indent >= column` according to + # the spec. But this condition will prohibit intuitively correct + # constructions such as + # key : { + # } + # #### + # if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.reader.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if bool(self.flow_level): + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.reader.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # type: (int) -> bool + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # type: () -> None + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding)) + + def fetch_stream_end(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + # The steam is finished. + self.done = True + + def fetch_directive(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + # type: () -> None + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + # type: () -> None + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + # type: (Any) -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.reader.get_mark() + self.reader.forward(3) + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[') + + def fetch_flow_mapping_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{') + + def fetch_flow_collection_start(self, TokenClass, to_push): + # type: (Any, Text) -> None + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + # Increase the flow level. + self.flow_context.append(to_push) + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + # type: (Any) -> None + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Decrease the flow level. + try: + popped = self.flow_context.pop() # NOQA + except IndexError: + # We must not be in a list or object. + # Defer error handling to the parser. + pass + # No simple keys after ']' or '}'. + self.allow_simple_key = False + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + # type: () -> None + # Simple keys are allowed after ','. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Add FLOW-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'sequence entries are not allowed here', self.reader.get_mark() + ) + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + # Simple keys are allowed after '-'. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'mapping keys are not allowed here', self.reader.get_mark() + ) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + # type: () -> None + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert( + key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark) + ) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert( + key.token_number - self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark), + ) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'mapping values are not allowed here', + self.reader.get_mark(), + ) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + # type: () -> None + # ALIAS could be a simple key. + self.save_possible_simple_key() + # No simple keys after ALIAS. + self.allow_simple_key = False + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + # type: () -> None + # ANCHOR could start a simple key. + self.save_possible_simple_key() + # No simple keys after ANCHOR. + self.allow_simple_key = False + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + # type: () -> None + # TAG could start a simple key. + self.save_possible_simple_key() + # No simple keys after TAG. + self.allow_simple_key = False + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + # type: () -> None + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + # type: () -> None + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + # type: (Any) -> None + # A simple key may follow a block scalar. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + # type: () -> None + self.fetch_flow_scalar(style="'") + + def fetch_double(self): + # type: () -> None + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + # type: (Any) -> None + # A flow scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after flow scalars. + self.allow_simple_key = False + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + # type: () -> None + # A plain scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + # type: () -> Any + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.reader.column == 0: + return True + return None + + def check_document_start(self): + # type: () -> Any + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_document_end(self): + # type: () -> Any + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_block_entry(self): + # type: () -> Any + # BLOCK-ENTRY: '-' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_key(self): + # type: () -> Any + # KEY(flow context): '?' + if bool(self.flow_level): + return True + # KEY(block context): '?' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_value(self): + # type: () -> Any + # VALUE(flow context): ':' + if self.scanner_processing_version == (1, 1): + if bool(self.flow_level): + return True + else: + if bool(self.flow_level): + if self.flow_context[-1] == '[': + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + elif self.tokens and isinstance(self.tokens[-1], ValueToken): + # mapping flow context scanning a value token + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + return True + # VALUE(block context): ':' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_plain(self): + # type: () -> Any + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + srp = self.reader.peek + ch = srp() + if self.scanner_processing_version == (1, 1): + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or ( + srp(1) not in _THE_END_SPACE_TAB + and (ch == '-' or (not self.flow_level and ch in '?:')) + ) + # YAML 1.2 + if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`': + # ################### ^ ??? + return True + ch1 = srp(1) + if ch == '-' and ch1 not in _THE_END_SPACE_TAB: + return True + if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB: + return True + + return srp(1) not in _THE_END_SPACE_TAB and ( + ch == '-' or (not self.flow_level and ch in '?:') + ) + + # Scanners. + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + _the_end = _THE_END + while not found: + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _the_end: + srf() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + return None + + def scan_directive(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + start_mark = self.reader.get_mark() + srf() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.reader.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.reader.get_mark() + else: + end_mark = self.reader.get_mark() + while srp() not in _THE_END: + srf() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + length = 0 + srp = self.reader.peek + ch = srp(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.': + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_yaml_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + major = self.scan_yaml_directive_number(start_mark) + if srp() != '.': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + srf() + minor = self.scan_yaml_directive_number(start_mark) + if srp() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + self.yaml_version = (major, minor) + return self.yaml_version + + def scan_yaml_directive_number(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + ch = srp() + if not ('0' <= ch <= '9'): + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected a digit, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + length = 0 + while '0' <= srp(length) <= '9': + length += 1 + value = int(self.reader.prefix(length)) + srf(length) + return value + + def scan_tag_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + handle = self.scan_tag_directive_handle(start_mark) + while srp() == ' ': + srf() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.reader.peek() + if ch != ' ': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_tag_directive_prefix(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.reader.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_directive_ignored_line(self, start_mark): + # type: (Any) -> None + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _THE_END: + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # type: (Any) -> Any + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + srp = self.reader.peek + start_mark = self.reader.get_mark() + indicator = srp() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.reader.forward() + length = 0 + ch = srp(length) + # while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + # or ch in '-_': + while check_anchorname_char(ch): + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + # ch1 = ch + # ch = srp() # no need to peek, ch is already set + # assert ch1 == ch + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + end_mark = self.reader.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + start_mark = self.reader.get_mark() + ch = srp(1) + if ch == '<': + handle = None + self.reader.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if srp() != '>': + raise ScannerError( + 'while parsing a tag', + start_mark, + _F("expected '>', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in _THE_END_SPACE_TAB: + handle = None + suffix = '!' + self.reader.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = srp(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.reader.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a tag', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + value = (handle, suffix) + end_mark = self.reader.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style, rt=False): + # type: (Any, Optional[bool]) -> Any + # See the specification for details. + srp = self.reader.peek + if style == '>': + folded = True + else: + folded = False + + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + + # Scan the header. + self.reader.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + # block scalar comment e.g. : |+ # comment text + block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent + 1 + if increment is None: + # no increment and top level, min_indent could be 0 + if min_indent < 1 and ( + style not in '|>' + or (self.scanner_processing_version == (1, 1)) + and getattr( + self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False + ) + ): + min_indent = 1 + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + if min_indent < 1: + min_indent = 1 + indent = min_indent + increment - 1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = "" + + # Scan the inner part of the block scalar. + while self.reader.column == indent and srp() != '\0': + chunks.extend(breaks) + leading_non_space = srp() not in ' \t' + length = 0 + while srp(length) not in _THE_END: + length += 1 + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if style in '|>' and min_indent == 0: + # at the beginning of a line, if in block style see if + # end of document/start_new_document + if self.check_document_start() or self.check_document_end(): + break + if self.reader.column == indent and srp() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if rt and folded and line_break == '\n': + chunks.append('\a') + if folded and line_break == '\n' and leading_non_space and srp() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + # if folded and line_break == '\n': + # if not breaks: + # if srp() not in ' \t': + # chunks.append(' ') + # else: + # chunks.append(line_break) + # else: + # chunks.append(line_break) + else: + break + + # Process trailing line breaks. The 'chomping' setting determines + # whether they are included in the value. + trailing = [] # type: List[Any] + if chomping in [None, True]: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + elif chomping in [None, False]: + trailing.extend(breaks) + + # We are done. + token = ScalarToken("".join(chunks), False, start_mark, end_mark, style) + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + if block_scalar_comment is not None: + token.add_pre_comments([block_scalar_comment]) + if len(trailing) > 0: + # Eat whitespaces and comments until we reach the next token. + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', None) + if comment_handler is not None: + line = end_mark.line - len(trailing) + for x in trailing: + assert x[-1] == '\n' + self.comments.add_blank_line(x, 0, line) # type: ignore + line += 1 + comment = self.scan_to_next_token() + while comment: + trailing.append(' ' * comment[1].column + comment[0]) + comment = self.scan_to_next_token() + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + # Keep track of the trailing whitespace and following comments + # as a comment token, if isn't all included in the actual value. + comment_end_mark = self.reader.get_mark() + comment = CommentToken("".join(trailing), end_mark, comment_end_mark) + token.add_post_comment(comment) + return token + + def scan_block_scalar_indicators(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + chomping = None + increment = None + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F('expected chomping or indentation indicators, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = prefix + while srp() not in _THE_END: + comment += srp() + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + self.scan_line_break() + return comment + + def scan_block_scalar_indentation(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + max_indent = 0 + end_mark = self.reader.get_mark() + while srp() in ' \r\n\x85\u2028\u2029': + if srp() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + else: + srf() + if self.reader.column > max_indent: + max_indent = self.reader.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # type: (int) -> Any + # See the specification for details. + chunks = [] + srp = self.reader.peek + srf = self.reader.forward + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + while srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # type: (Any) -> Any + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + srp = self.reader.peek + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + quote = srp() + self.reader.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while srp() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.reader.forward() + end_mark = self.reader.get_mark() + return ScalarToken("".join(chunks), False, start_mark, end_mark, style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '"': '"', + '/': '/', # as per http://www.json.org/ + '\\': '\\', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8} + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + length = 0 + while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029': + length += 1 + if length != 0: + chunks.append(self.reader.prefix(length)) + srf(length) + ch = srp() + if not double and ch == "'" and srp(1) == "'": + chunks.append("'") + srf(2) + elif (double and ch == "'") or (not double and ch in '"\\'): + chunks.append(ch) + srf() + elif double and ch == '\\': + srf() + ch = srp() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + srf() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + srf() + for k in range(length): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + _F( + 'expected escape sequence of {length:d} hexdecimal ' + 'numbers, but found {srp_call!r}', + length=length, + srp_call=srp(k), + ), + self.reader.get_mark(), + ) + code = int(self.reader.prefix(length), 16) + chunks.append(chr(code)) + srf(length) + elif ch in '\n\r\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + _F('found unknown escape character {ch!r}', ch=ch), + self.reader.get_mark(), + ) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + chunks = [] + length = 0 + while srp(length) in ' \t': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch == '\0': + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected end of stream', + self.reader.get_mark(), + ) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected document separator', + self.reader.get_mark(), + ) + while srp() in ' \t': + srf() + if srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # type: () -> Any + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ': ' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + end_mark = start_mark + indent = self.indent + 1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + # if indent == 0: + # indent = 1 + spaces = [] # type: List[Any] + while True: + length = 0 + if srp() == '#': + break + while True: + ch = srp(length) + if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB: + pass + elif ch == '?' and self.scanner_processing_version != (1, 1): + pass + elif ( + ch in _THE_END_SPACE_TAB + or ( + not self.flow_level + and ch == ':' + and srp(length + 1) in _THE_END_SPACE_TAB + ) + or (self.flow_level and ch in ',:?[]{}') + ): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if ( + self.flow_level + and ch == ':' + and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}' + ): + srf(length) + raise ScannerError( + 'while scanning a plain scalar', + start_mark, + "found unexpected ':'", + self.reader.get_mark(), + 'Please check ' + 'http://pyyaml.org/wiki/YAMLColonInFlowContext ' + 'for details.', + ) + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.reader.prefix(length)) + srf(length) + end_mark = self.reader.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if ( + not spaces + or srp() == '#' + or (not self.flow_level and self.reader.column < indent) + ): + break + + token = ScalarToken("".join(chunks), True, start_mark, end_mark) + # getattr provides True so C type loader, which cannot handle comment, + # will not make CommentToken + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + if spaces and spaces[0] == '\n': + # Create a comment token to preserve the trailing line breaks. + comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark) + token.add_post_comment(comment) + elif comment_handler is not False: + line = start_mark.line + 1 + for ch in spaces: + if ch == '\n': + self.comments.add_blank_line('\n', 0, line) # type: ignore + line += 1 + + return token + + def scan_plain_spaces(self, indent, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + length = 0 + while srp(length) in ' ': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + breaks = [] + while srp() in ' \r\n\x85\u2028\u2029': + if srp() == ' ': + srf() + else: + breaks.append(self.scan_line_break()) + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + srp = self.reader.peek + ch = srp() + if ch != '!': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F("expected '!', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + length = 1 + ch = srp(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_': + length += 1 + ch = srp(length) + if ch != '!': + self.reader.forward(length) + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F("expected '!', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + length += 1 + value = self.reader.prefix(length) + self.reader.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # Note: we do not check if URI is well-formed. + srp = self.reader.peek + chunks = [] + length = 0 + ch = srp(length) + while ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in "-;/?:@&=+$,_.!~*'()[]%" + or ((self.scanner_processing_version > (1, 1)) and ch == '#') + ): + if ch == '%': + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = srp(length) + if length != 0: + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + if not chunks: + raise ScannerError( + _F('while parsing an {name!s}', name=name), + start_mark, + _F('expected URI, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + return "".join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + code_bytes = [] # type: List[Any] + mark = self.reader.get_mark() + while srp() == '%': + srf() + for k in range(2): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F( + 'expected URI escape sequence of 2 hexdecimal numbers,' + ' but found {srp_call!r}', + srp_call=srp(k), + ), + self.reader.get_mark(), + ) + code_bytes.append(int(self.reader.prefix(2), 16)) + srf(2) + try: + value = bytes(code_bytes).decode('utf-8') + except UnicodeDecodeError as exc: + raise ScannerError( + _F('while scanning an {name!s}', name=name), start_mark, str(exc), mark + ) + return value + + def scan_line_break(self): + # type: () -> Any + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + return "" + + +class RoundTripScanner(Scanner): + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + return self.tokens[0] + return None + + def _gather_comments(self): + # type: () -> Any + """combine multiple comment lines and assign to next non-comment-token""" + comments = [] # type: List[Any] + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + comment = self.tokens.pop(0) + self.tokens_taken += 1 + comments.append(comment) + while self.need_more_tokens(): + self.fetch_more_tokens() + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + self.tokens_taken += 1 + comment = self.tokens.pop(0) + # nprint('dropping2', comment) + comments.append(comment) + if len(comments) >= 1: + self.tokens[0].add_pre_comments(comments) + # pull in post comment on e.g. ':' + if not self.done and len(self.tokens) < 2: + self.fetch_more_tokens() + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + # nprint('tk', self.tokens) + # only add post comment to single line tokens: + # scalar, value token. FlowXEndToken, otherwise + # hidden streamtokens could get them (leave them and they will be + # pre comments for the next map/seq + if ( + len(self.tokens) > 1 + and isinstance( + self.tokens[0], + (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken), + ) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens[0].add_post_comment(c) + elif ( + len(self.tokens) > 1 + and isinstance(self.tokens[0], ScalarToken) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + c.value = ( + '\n' * (c.start_mark.line - self.tokens[0].end_mark.line) + + (' ' * c.start_mark.column) + + c.value + ) + self.tokens[0].add_post_comment(c) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens_taken += 1 + return self.tokens.pop(0) + return None + + def fetch_comment(self, comment): + # type: (Any) -> None + value, start_mark, end_mark = comment + while value and value[-1] == ' ': + # empty line within indented key context + # no need to update end-mark, that is not used + value = value[:-1] + self.tokens.append(CommentToken(value, start_mark, end_mark)) + + # scanner + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + start_mark = self.reader.get_mark() + comment = ch + srf() + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # gather any blank lines following the comment too + ch = self.scan_line_break() + while len(ch) > 0: + comment += ch + ch = self.scan_line_break() + end_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + return comment, start_mark, end_mark + if self.scan_line_break() != '': + start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + end_mark = self.reader.get_mark() + return comment, start_mark, end_mark + else: + found = True + return None + + def scan_line_break(self, empty_line=False): + # type: (bool) -> Text + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() # type: Text + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + elif empty_line and ch in '\t ': + self.reader.forward() + return ch + return "" + + def scan_block_scalar(self, style, rt=True): + # type: (Any, Optional[bool]) -> Any + return Scanner.scan_block_scalar(self, style, rt=rt) + + +# commenthandling 2021, differentiatiation not needed + +VALUECMNT = 0 +KEYCMNT = 0 # 1 +# TAGCMNT = 2 +# ANCHORCMNT = 3 + + +class CommentBase: + __slots__ = ('value', 'line', 'column', 'used', 'function', 'fline', 'ufun', 'uline') + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + self.value = value + self.line = line + self.column = column + self.used = ' ' + info = inspect.getframeinfo(inspect.stack()[3][0]) + self.function = info.function + self.fline = info.lineno + self.ufun = None + self.uline = None + + def set_used(self, v='+'): + # type: (Any) -> None + self.used = v + info = inspect.getframeinfo(inspect.stack()[1][0]) + self.ufun = info.function # type: ignore + self.uline = info.lineno # type: ignore + + def set_assigned(self): + # type: () -> None + self.used = '|' + + def __str__(self): + # type: () -> str + return _F('{value}', value=self.value) # type: ignore + + def __repr__(self): + # type: () -> str + return _F('{value!r}', value=self.value) # type: ignore + + def info(self): + # type: () -> str + return _F( # type: ignore + '{name}{used} {line:2}:{column:<2} "{value:40s} {function}:{fline} {ufun}:{uline}', + name=self.name, # type: ignore + line=self.line, + column=self.column, + value=self.value + '"', + used=self.used, + function=self.function, + fline=self.fline, + ufun=self.ufun, + uline=self.uline, + ) + + +class EOLComment(CommentBase): + name = 'EOLC' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class FullLineComment(CommentBase): + name = 'FULL' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class BlankLineComment(CommentBase): + name = 'BLNK' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class ScannedComments: + def __init__(self): + # type: (Any) -> None + self.comments = {} # type: ignore + self.unused = [] # type: ignore + + def add_eol_comment(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + if comment.count('\n') == 1: + assert comment[-1] == '\n' + else: + assert '\n' not in comment + self.comments[line] = retval = EOLComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def add_blank_line(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + assert comment.count('\n') == 1 and comment[-1] == '\n' + assert line not in self.comments + self.comments[line] = retval = BlankLineComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def add_full_line_comment(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + assert comment.count('\n') == 1 and comment[-1] == '\n' + # if comment.startswith('# C12'): + # raise + # this raises in line 2127 fro 330 + self.comments[line] = retval = FullLineComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def __getitem__(self, idx): + # type: (Any) -> Any + return self.comments[idx] + + def __str__(self): + # type: () -> Any + return ( + 'ParsedComments:\n ' + + '\n '.join( + ( + _F('{lineno:2} {x}', lineno=lineno, x=x.info()) + for lineno, x in self.comments.items() + ) + ) + + '\n' + ) + + def last(self): + # type: () -> str + lineno, x = list(self.comments.items())[-1] + return _F('{lineno:2} {x}\n', lineno=lineno, x=x.info()) # type: ignore + + def any_unprocessed(self): + # type: () -> bool + # ToDo: might want to differentiate based on lineno + return len(self.unused) > 0 + # for lno, comment in reversed(self.comments.items()): + # if comment.used == ' ': + # return True + # return False + + def unprocessed(self, use=False): + # type: (Any) -> Any + while len(self.unused) > 0: + first = self.unused.pop(0) if use else self.unused[0] + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf('using', first, self.comments[first].value, info.function, info.lineno) + yield first, self.comments[first] + if use: + self.comments[first].set_used() + + def assign_pre(self, token): + # type: (Any) -> Any + token_line = token.start_mark.line + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf('assign_pre', token_line, self.unused, info.function, info.lineno) + gobbled = False + while self.unused and self.unused[0] < token_line: + gobbled = True + first = self.unused.pop(0) + xprintf('assign_pre < ', first) + self.comments[first].set_used() + token.add_comment_pre(first) + return gobbled + + def assign_eol(self, tokens): + # type: (Any) -> Any + try: + comment_line = self.unused[0] + except IndexError: + return + if not isinstance(self.comments[comment_line], EOLComment): + return + idx = 1 + while tokens[-idx].start_mark.line > comment_line or isinstance( + tokens[-idx], ValueToken + ): + idx += 1 + xprintf('idx1', idx) + if ( + len(tokens) > idx + and isinstance(tokens[-idx], ScalarToken) + and isinstance(tokens[-(idx + 1)], ScalarToken) + ): + return + try: + if isinstance(tokens[-idx], ScalarToken) and isinstance( + tokens[-(idx + 1)], KeyToken + ): + try: + eol_idx = self.unused.pop(0) + self.comments[eol_idx].set_used() + xprintf('>>>>>a', idx, eol_idx, KEYCMNT) + tokens[-idx].add_comment_eol(eol_idx, KEYCMNT) + except IndexError: + raise NotImplementedError + return + except IndexError: + xprintf('IndexError1') + pass + try: + if isinstance(tokens[-idx], ScalarToken) and isinstance( + tokens[-(idx + 1)], (ValueToken, BlockEntryToken) + ): + try: + eol_idx = self.unused.pop(0) + self.comments[eol_idx].set_used() + tokens[-idx].add_comment_eol(eol_idx, VALUECMNT) + except IndexError: + raise NotImplementedError + return + except IndexError: + xprintf('IndexError2') + pass + for t in tokens: + xprintf('tt-', t) + xprintf('not implemented EOL', type(tokens[-idx])) + import sys + + sys.exit(0) + + def assign_post(self, token): + # type: (Any) -> Any + token_line = token.start_mark.line + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf('assign_post', token_line, self.unused, info.function, info.lineno) + gobbled = False + while self.unused and self.unused[0] < token_line: + gobbled = True + first = self.unused.pop(0) + xprintf('assign_post < ', first) + self.comments[first].set_used() + token.add_comment_post(first) + return gobbled + + def str_unprocessed(self): + # type: () -> Any + return ''.join( + ( + _F(' {ind:2} {x}\n', ind=ind, x=x.info()) + for ind, x in self.comments.items() + if x.used == ' ' + ) + ) + + +class RoundTripScannerSC(Scanner): # RoundTripScanner Split Comments + def __init__(self, *arg, **kw): + # type: (Any, Any) -> None + super().__init__(*arg, **kw) + assert self.loader is not None + # comments isinitialised on .need_more_tokens and persist on + # self.loader.parsed_comments + self.comments = None + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + if isinstance(self.tokens[0], BlockEndToken): + self.comments.assign_post(self.tokens[0]) # type: ignore + else: + self.comments.assign_pre(self.tokens[0]) # type: ignore + self.tokens_taken += 1 + return self.tokens.pop(0) + + def need_more_tokens(self): + # type: () -> bool + if self.comments is None: + self.loader.parsed_comments = self.comments = ScannedComments() # type: ignore + if self.done: + return False + if len(self.tokens) == 0: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + if len(self.tokens) < 2: + return True + if self.tokens[0].start_mark.line == self.tokens[-1].start_mark.line: + return True + if True: + xprintf('-x--', len(self.tokens)) + for t in self.tokens: + xprintf(t) + # xprintf(self.comments.last()) + xprintf(self.comments.str_unprocessed()) # type: ignore + self.comments.assign_pre(self.tokens[0]) # type: ignore + self.comments.assign_eol(self.tokens) # type: ignore + return False + + def scan_to_next_token(self): + # type: () -> None + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + start_mark = self.reader.get_mark() + # xprintf('current_mark', start_mark.line, start_mark.column) + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + comment_start_mark = self.reader.get_mark() + comment = ch + srf() # skipt the '#' + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # we have a comment + if start_mark.column == 0: + self.comments.add_full_line_comment( # type: ignore + comment, comment_start_mark.column, comment_start_mark.line + ) + else: + self.comments.add_eol_comment( # type: ignore + comment, comment_start_mark.column, comment_start_mark.line + ) + comment = "" + # gather any blank lines or full line comments following the comment as well + self.scan_empty_or_full_line_comments() + if not self.flow_level: + self.allow_simple_key = True + return + if bool(self.scan_line_break()): + # start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + self.scan_empty_or_full_line_comments() + return None + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + _ = self.reader.get_mark() # gobble end_mark + return None + else: + found = True + return None + + def scan_empty_or_full_line_comments(self): + # type: () -> None + blmark = self.reader.get_mark() + assert blmark.column == 0 + blanks = "" + comment = None + mark = None + ch = self.reader.peek() + while True: + # nprint('ch', repr(ch), self.reader.get_mark().column) + if ch in '\r\n\x85\u2028\u2029': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + if comment is not None: + comment += '\n' + self.comments.add_full_line_comment(comment, mark.column, mark.line) + comment = None + else: + blanks += '\n' + self.comments.add_blank_line(blanks, blmark.column, blmark.line) # type: ignore # NOQA + blanks = "" + blmark = self.reader.get_mark() + ch = self.reader.peek() + continue + if comment is None: + if ch in ' \t': + blanks += ch + elif ch == '#': + mark = self.reader.get_mark() + comment = '#' + else: + # xprintf('breaking on', repr(ch)) + break + else: + comment += ch + self.reader.forward() + ch = self.reader.peek() + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = '' + mark = self.reader.get_mark() + while srp() not in _THE_END: + comment += srp() + srf() + comment += '\n' # type: ignore + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + if comment is not None: + self.comments.add_eol_comment(comment, mark.column, mark.line) # type: ignore + self.scan_line_break() + return None diff --git a/pipenv/vendor/ruamel/yaml/serializer.py b/pipenv/vendor/ruamel/yaml/serializer.py new file mode 100644 index 0000000000..904ca34d65 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/serializer.py @@ -0,0 +1,241 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.error import YAMLError +from pipenv.vendor.ruamel.yaml.compat import nprint, DBG_NODE, dbg, nprintf # NOQA +from pipenv.vendor.ruamel.yaml.util import RegExp + +from pipenv.vendor.ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, + DocumentStartEvent, + DocumentEndEvent, +) +from pipenv.vendor.ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Union, Text, Optional # NOQA + from pipenv.vendor.ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Serializer', 'SerializerError'] + + +class SerializerError(YAMLError): + pass + + +class Serializer: + + # 'id' and 3+ numbers, but not 000 + ANCHOR_TEMPLATE = 'id%03d' + ANCHOR_RE = RegExp('id(?!000$)\\d{3,}') + + def __init__( + self, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + dumper=None, + ): + # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None: + self.dumper._serializer = self + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + if isinstance(version, str): + self.use_version = tuple(map(int, version.split('.'))) + else: + self.use_version = version # type: ignore + self.use_tags = tags + self.serialized_nodes = {} # type: Dict[Any, Any] + self.anchors = {} # type: Dict[Any, Any] + self.last_anchor_id = 0 + self.closed = None # type: Optional[bool] + self._templated_id = None + + @property + def emitter(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + return self.dumper.emitter + return self.dumper._emitter + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + self.dumper.resolver + return self.dumper._resolver + + def open(self): + # type: () -> None + if self.closed is None: + self.emitter.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError('serializer is closed') + else: + raise SerializerError('serializer is already opened') + + def close(self): + # type: () -> None + if self.closed is None: + raise SerializerError('serializer is not opened') + elif not self.closed: + self.emitter.emit(StreamEndEvent()) + self.closed = True + + # def __del__(self): + # self.close() + + def serialize(self, node): + # type: (Any) -> None + if dbg(DBG_NODE): + nprint('Serializing nodes') + node.dump() + if self.closed is None: + raise SerializerError('serializer is not opened') + elif self.closed: + raise SerializerError('serializer is closed') + self.emitter.emit( + DocumentStartEvent( + explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags + ) + ) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + # type: (Any) -> None + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + anchor = None + try: + if node.anchor.always_dump: + anchor = node.anchor.value + except: # NOQA + pass + self.anchors[node] = anchor + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + # type: (Any) -> Any + try: + anchor = node.anchor.value + except: # NOQA + anchor = None + if anchor is None: + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + return anchor + + def serialize_node(self, node, parent, index): + # type: (Any, Any, Any) -> None + alias = self.anchors[node] + if node in self.serialized_nodes: + node_style = getattr(node, 'style', None) + if node_style != '?': + node_style = None + self.emitter.emit(AliasEvent(alias, style=node_style)) + else: + self.serialized_nodes[node] = True + self.resolver.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + # here check if the node.tag equals the one that would result from parsing + # if not equal quoting is necessary for strings + detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True)) + implicit = ( + (node.tag == detected_tag), + (node.tag == default_tag), + node.tag.startswith('tag:yaml.org,2002:'), + ) + self.emitter.emit( + ScalarEvent( + alias, + node.tag, + implicit, + node.value, + style=node.style, + comment=node.comment, + ) + ) + elif isinstance(node, SequenceNode): + implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True) + comment = node.comment + end_comment = None + seq_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + seq_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + else: + end_comment = None + self.emitter.emit( + SequenceStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + ) + ) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment])) + elif isinstance(node, MappingNode): + implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True) + comment = node.comment + end_comment = None + map_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + map_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + self.emitter.emit( + MappingStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + nr_items=len(node.value), + ) + ) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment])) + self.resolver.ascend_resolver() + + +def templated_id(s): + # type: (Text) -> Any + return Serializer.ANCHOR_RE.match(s) diff --git a/pipenv/vendor/ruamel/yaml/timestamp.py b/pipenv/vendor/ruamel/yaml/timestamp.py new file mode 100644 index 0000000000..58eef04dc8 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/timestamp.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +import datetime +import copy + +# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object +# a more complete datetime might be used by safe loading as well + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + + +class TimeStamp(datetime.datetime): + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any] + + def __new__(cls, *args, **kw): # datetime is immutable + # type: (Any, Any) -> Any + return datetime.datetime.__new__(cls, *args, **kw) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second) + ts._yaml = copy.deepcopy(self._yaml) + return ts + + def replace( + self, + year=None, + month=None, + day=None, + hour=None, + minute=None, + second=None, + microsecond=None, + tzinfo=True, + fold=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold) + ts._yaml = copy.deepcopy(self._yaml) + return ts diff --git a/pipenv/vendor/ruamel/yaml/tokens.py b/pipenv/vendor/ruamel/yaml/tokens.py new file mode 100644 index 0000000000..1d2a0ca4bc --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/tokens.py @@ -0,0 +1,404 @@ +# coding: utf-8 + +from pipenv.vendor.ruamel.yaml.compat import _F, nprintf # NOQA + +if False: # MYPY + from typing import Text, Any, Dict, Optional, List # NOQA + from .error import StreamMark # NOQA + +SHOW_LINES = True + + +class Token: + __slots__ = 'start_mark', 'end_mark', '_comment' + + def __init__(self, start_mark, end_mark): + # type: (StreamMark, StreamMark) -> None + self.start_mark = start_mark + self.end_mark = end_mark + + def __repr__(self): + # type: () -> Any + # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and + # hasattr('self', key)] + attributes = [key for key in self.__slots__ if not key.endswith('_mark')] + attributes.sort() + # arguments = ', '.join( + # [_F('{key!s}={gattr!r})', key=key, gattr=getattr(self, key)) for key in attributes] + # ) + arguments = [ + _F('{key!s}={gattr!r}', key=key, gattr=getattr(self, key)) for key in attributes + ] + if SHOW_LINES: + try: + arguments.append('line: ' + str(self.start_mark.line)) + except: # NOQA + pass + try: + arguments.append('comment: ' + str(self._comment)) + except: # NOQA + pass + return '{}({})'.format(self.__class__.__name__, ', '.join(arguments)) + + @property + def column(self): + # type: () -> int + return self.start_mark.column + + @column.setter + def column(self, pos): + # type: (Any) -> None + self.start_mark.column = pos + + # old style ( <= 0.17) is a TWO element list with first being the EOL + # comment concatenated with following FLC/BLNK; and second being a list of FLC/BLNK + # preceding the token + # new style ( >= 0.17 ) is a THREE element list with the first being a list of + # preceding FLC/BLNK, the second EOL and the third following FLC/BLNK + # note that new style has differing order, and does not consist of CommentToken(s) + # but of CommentInfo instances + # any non-assigned values in new style are None, but first and last can be empty list + # new style routines add one comment at a time + + # going to be deprecated in favour of add_comment_eol/post + def add_post_comment(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + else: + assert len(self._comment) in [2, 5] # make sure it is version 0 + # if isinstance(comment, CommentToken): + # if comment.value.startswith('# C09'): + # raise + self._comment[0] = comment + + # going to be deprecated in favour of add_comment_pre + def add_pre_comments(self, comments): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + else: + assert len(self._comment) == 2 # make sure it is version 0 + assert self._comment[1] is None + self._comment[1] = comments + return + + # new style + def add_comment_pre(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [[], None, None] # type: ignore + else: + assert len(self._comment) == 3 + if self._comment[0] is None: + self._comment[0] = [] # type: ignore + self._comment[0].append(comment) # type: ignore + + def add_comment_eol(self, comment, comment_type): + # type: (Any, Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None, None] + else: + assert len(self._comment) == 3 + assert self._comment[1] is None + if self.comment[1] is None: + self._comment[1] = [] # type: ignore + self._comment[1].extend([None] * (comment_type + 1 - len(self.comment[1]))) # type: ignore # NOQA + # nprintf('commy', self.comment, comment_type) + self._comment[1][comment_type] = comment # type: ignore + + def add_comment_post(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None, []] # type: ignore + else: + assert len(self._comment) == 3 + if self._comment[2] is None: + self._comment[2] = [] # type: ignore + self._comment[2].append(comment) # type: ignore + + # def get_comment(self): + # # type: () -> Any + # return getattr(self, '_comment', None) + + @property + def comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + def move_old_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], None, None, c[0]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + if c[0] and tc[0] or c[1] and tc[1]: + raise NotImplementedError(_F('overlap in comment {c!r} {tc!r}', c=c, tc=tc)) + if c[0]: + tc[0] = c[0] + if c[1]: + tc[1] = c[1] + return self + + def split_old_comment(self): + # type: () -> Any + """ split the post part of a comment, and return it + as comment to be added. Delete second part if [None, None] + abc: # this goes to sequence + # this goes to first element + - first element + """ + comment = self.comment + if comment is None or comment[0] is None: + return None # nothing to do + ret_val = [comment[0], None] + if comment[1] is None: + delattr(self, '_comment') + return ret_val + + def move_new_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], c[2]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + # if self and target have both pre, eol or post comments, something seems wrong + for idx in range(3): + if c[idx] is not None and tc[idx] is not None: + raise NotImplementedError(_F('overlap in comment {c!r} {tc!r}', c=c, tc=tc)) + # move the comment parts + for idx in range(3): + if c[idx]: + tc[idx] = c[idx] + return self + + +# class BOMToken(Token): +# id = '' + + +class DirectiveToken(Token): + __slots__ = 'name', 'value' + id = '' + + def __init__(self, name, value, start_mark, end_mark): + # type: (Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.name = name + self.value = value + + +class DocumentStartToken(Token): + __slots__ = () + id = '' + + +class DocumentEndToken(Token): + __slots__ = () + id = '' + + +class StreamStartToken(Token): + __slots__ = ('encoding',) + id = '' + + def __init__(self, start_mark=None, end_mark=None, encoding=None): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.encoding = encoding + + +class StreamEndToken(Token): + __slots__ = () + id = '' + + +class BlockSequenceStartToken(Token): + __slots__ = () + id = '' + + +class BlockMappingStartToken(Token): + __slots__ = () + id = '' + + +class BlockEndToken(Token): + __slots__ = () + id = '' + + +class FlowSequenceStartToken(Token): + __slots__ = () + id = '[' + + +class FlowMappingStartToken(Token): + __slots__ = () + id = '{' + + +class FlowSequenceEndToken(Token): + __slots__ = () + id = ']' + + +class FlowMappingEndToken(Token): + __slots__ = () + id = '}' + + +class KeyToken(Token): + __slots__ = () + id = '?' + + # def x__repr__(self): + # return 'KeyToken({})'.format( + # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0]) + + +class ValueToken(Token): + __slots__ = () + id = ':' + + +class BlockEntryToken(Token): + __slots__ = () + id = '-' + + +class FlowEntryToken(Token): + __slots__ = () + id = ',' + + +class AliasToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class AnchorToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class TagToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class ScalarToken(Token): + __slots__ = 'value', 'plain', 'style' + id = '' + + def __init__(self, value, plain, start_mark, end_mark, style=None): + # type: (Any, Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + self.plain = plain + self.style = style + + +class CommentToken(Token): + __slots__ = '_value', 'pre_done' + id = '' + + def __init__(self, value, start_mark=None, end_mark=None, column=None): + # type: (Any, Any, Any, Any) -> None + if start_mark is None: + assert column is not None + self._column = column + Token.__init__(self, start_mark, None) # type: ignore + self._value = value + + @property + def value(self): + # type: () -> str + if isinstance(self._value, str): + return self._value + return "".join(self._value) + + @value.setter + def value(self, val): + # type: (Any) -> None + self._value = val + + def reset(self): + # type: () -> None + if hasattr(self, 'pre_done'): + delattr(self, 'pre_done') + + def __repr__(self): + # type: () -> Any + v = '{!r}'.format(self.value) + if SHOW_LINES: + try: + v += ', line: ' + str(self.start_mark.line) + except: # NOQA + pass + try: + v += ', col: ' + str(self.start_mark.column) + except: # NOQA + pass + return 'CommentToken({})'.format(v) + + def __eq__(self, other): + # type: (Any) -> bool + if self.start_mark != other.start_mark: + return False + if self.end_mark != other.end_mark: + return False + if self.value != other.value: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) diff --git a/pipenv/vendor/ruamel/yaml/util.py b/pipenv/vendor/ruamel/yaml/util.py new file mode 100644 index 0000000000..9ff51bdee2 --- /dev/null +++ b/pipenv/vendor/ruamel/yaml/util.py @@ -0,0 +1,256 @@ +# coding: utf-8 + +""" +some helper functions that might be generally useful +""" + +import datetime +from functools import partial +import re + + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + from .compat import StreamTextType # NOQA + + +class LazyEval: + """ + Lightweight wrapper around lazily evaluated func(*args, **kwargs). + + func is only evaluated when any attribute of its return value is accessed. + Every attribute access is passed through to the wrapped value. + (This only excludes special cases like method-wrappers, e.g., __hash__.) + The sole additional attribute is the lazy_self function which holds the + return value (or, prior to evaluation, func and arguments), in its closure. + """ + + def __init__(self, func, *args, **kwargs): + # type: (Any, Any, Any) -> None + def lazy_self(): + # type: () -> Any + return_value = func(*args, **kwargs) + object.__setattr__(self, 'lazy_self', lambda: return_value) + return return_value + + object.__setattr__(self, 'lazy_self', lazy_self) + + def __getattribute__(self, name): + # type: (Any) -> Any + lazy_self = object.__getattribute__(self, 'lazy_self') + if name == 'lazy_self': + return lazy_self + return getattr(lazy_self(), name) + + def __setattr__(self, name, value): + # type: (Any, Any) -> None + setattr(self.lazy_self(), name, value) + + +RegExp = partial(LazyEval, re.compile) + +timestamp_regexp = RegExp( + """^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:((?P[Tt])|[ \\t]+) # explictly not retaining extra spaces + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\\.(?P[0-9]*))? + (?:[ \\t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$""", + re.X, +) + + +def create_timestamp( + year, month, day, t, hour, minute, second, fraction, tz, tz_sign, tz_hour, tz_minute +): + # type: (Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any + # create a timestamp from match against timestamp_regexp + MAX_FRAC = 999999 + year = int(year) + month = int(month) + day = int(day) + if not hour: + return datetime.date(year, month, day) + hour = int(hour) + minute = int(minute) + second = int(second) + frac = 0 + if fraction: + frac_s = fraction[:6] + while len(frac_s) < 6: + frac_s += '0' + frac = int(frac_s) + if len(fraction) > 6 and int(fraction[6]) > 4: + frac += 1 + if frac > MAX_FRAC: + fraction = 0 + else: + fraction = frac + else: + fraction = 0 + delta = None + if tz_sign: + tz_hour = int(tz_hour) + tz_minute = int(tz_minute) if tz_minute else 0 + delta = datetime.timedelta( + hours=tz_hour, minutes=tz_minute, seconds=1 if frac > MAX_FRAC else 0 + ) + if tz_sign == '-': + delta = -delta + elif frac > MAX_FRAC: + delta = -datetime.timedelta(seconds=1) + # should do something else instead (or hook this up to the preceding if statement + # in reverse + # if delta is None: + # return datetime.datetime(year, month, day, hour, minute, second, fraction) + # return datetime.datetime(year, month, day, hour, minute, second, fraction, + # datetime.timezone.utc) + # the above is not good enough though, should provide tzinfo. In Python3 that is easily + # doable drop that kind of support for Python2 as it has not native tzinfo + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + +# originally as comment +# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605 +# if you use this in your code, I suggest adding a test in your test suite +# that check this routines output against a known piece of your YAML +# before upgrades to this code break your round-tripped YAML +def load_yaml_guess_indent(stream, **kw): + # type: (StreamTextType, Any) -> Any + """guess the indent and block sequence indent of yaml stream/string + + returns round_trip_loaded stream, indent level, block sequence indent + - block sequence indent is the number of spaces before a dash relative to previous indent + - if there are no block sequences, indent is taken from nested mappings, block sequence + indent is unset (None) in that case + """ + from .main import YAML + + # load a YAML document, guess the indentation, if you use TABs you are on your own + def leading_spaces(line): + # type: (Any) -> int + idx = 0 + while idx < len(line) and line[idx] == ' ': + idx += 1 + return idx + + if isinstance(stream, str): + yaml_str = stream # type: Any + elif isinstance(stream, bytes): + # most likely, but the Reader checks BOM for this + yaml_str = stream.decode('utf-8') + else: + yaml_str = stream.read() + map_indent = None + indent = None # default if not found for some reason + block_seq_indent = None + prev_line_key_only = None + key_indent = 0 + for line in yaml_str.splitlines(): + rline = line.rstrip() + lline = rline.lstrip() + if lline.startswith('- '): + l_s = leading_spaces(line) + block_seq_indent = l_s - key_indent + idx = l_s + 1 + while line[idx] == ' ': # this will end as we rstripped + idx += 1 + if line[idx] == '#': # comment after - + continue + indent = idx - key_indent + break + if map_indent is None and prev_line_key_only is not None and rline: + idx = 0 + while line[idx] in ' -': + idx += 1 + if idx > prev_line_key_only: + map_indent = idx - prev_line_key_only + if rline.endswith(':'): + key_indent = leading_spaces(line) + idx = 0 + while line[idx] == ' ': # this will end on ':' + idx += 1 + prev_line_key_only = idx + continue + prev_line_key_only = None + if indent is None and map_indent is not None: + indent = map_indent + yaml = YAML() + return yaml.load(yaml_str, **kw), indent, block_seq_indent # type: ignore + + +def configobj_walker(cfg): + # type: (Any) -> Any + """ + walks over a ConfigObj (INI file with comments) generating + corresponding YAML output (including comments + """ + from configobj import ConfigObj # type: ignore + + assert isinstance(cfg, ConfigObj) + for c in cfg.initial_comment: + if c.strip(): + yield c + for s in _walk_section(cfg): + if s.strip(): + yield s + for c in cfg.final_comment: + if c.strip(): + yield c + + +def _walk_section(s, level=0): + # type: (Any, int) -> Any + from configobj import Section + + assert isinstance(s, Section) + indent = ' ' * level + for name in s.scalars: + for c in s.comments[name]: + yield indent + c.strip() + x = s[name] + if '\n' in x: + i = indent + ' ' + x = '|\n' + i + x.strip().replace('\n', '\n' + i) + elif ':' in x: + x = "'" + x.replace("'", "''") + "'" + line = '{0}{1}: {2}'.format(indent, name, x) + c = s.inline_comments[name] + if c: + line += ' ' + c + yield line + for name in s.sections: + for c in s.comments[name]: + yield indent + c.strip() + line = '{0}{1}:'.format(indent, name) + c = s.inline_comments[name] + if c: + line += ' ' + c + yield line + for val in _walk_section(s[name], level=level + 1): + yield val + + +# def config_obj_2_rt_yaml(cfg): +# from .comments import CommentedMap, CommentedSeq +# from configobj import ConfigObj +# assert isinstance(cfg, ConfigObj) +# #for c in cfg.initial_comment: +# # if c.strip(): +# # pass +# cm = CommentedMap() +# for name in s.sections: +# cm[name] = d = CommentedMap() +# +# +# #for c in cfg.final_comment: +# # if c.strip(): +# # yield c +# return cm diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index bb1cadfe97..3cba6133ab 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -24,6 +24,7 @@ python-dateutil==2.8.2 python-dotenv==0.19.0 pythonfinder==1.2.10 requirementslib==1.6.9 +ruamel.yaml==0.17.21 shellingham==1.4.0 six==1.16.0 termcolor==1.1.0 diff --git a/tasks/vendoring/patches/patched/safety-main.patch b/tasks/vendoring/patches/patched/safety-main.patch index 30073c496c..fd2275342b 100644 --- a/tasks/vendoring/patches/patched/safety-main.patch +++ b/tasks/vendoring/patches/patched/safety-main.patch @@ -1,54 +1,13 @@ diff --git a/pipenv/patched/safety/__main__.py b/pipenv/patched/safety/__main__.py -index d9a0bdab..f905408a 100644 +index d9a0bdab..be36e88b 100644 --- a/pipenv/patched/safety/__main__.py +++ b/pipenv/patched/safety/__main__.py -@@ -1,8 +1,48 @@ +@@ -1,7 +1,7 @@ """Allow safety to be executable through `python -m safety`.""" from __future__ import absolute_import -from .cli import cli -+import os -+import sys -+import sysconfig -+ -+ -+PATCHED_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -+PIPENV_DIR = os.path.dirname(PATCHED_DIR) -+VENDORED_DIR = os.path.join("PIPENV_DIR", "vendor") -+ -+ -+def get_site_packages(): -+ prefixes = {sys.prefix, sysconfig.get_config_var('prefix')} -+ try: -+ prefixes.add(sys.real_prefix) -+ except AttributeError: -+ pass -+ form = sysconfig.get_path('purelib', expand=False) -+ py_version_short = '{0[0]}.{0[1]}'.format(sys.version_info) -+ return { -+ form.format(base=prefix, py_version_short=py_version_short) -+ for prefix in prefixes -+ } -+ -+ -+def insert_before_site_packages(*paths): -+ site_packages = get_site_packages() -+ index = None -+ for i, path in enumerate(sys.path): -+ if path in site_packages: -+ index = i -+ break -+ if index is None: -+ sys.path += list(paths) -+ else: -+ sys.path = sys.path[:index] + list(paths) + sys.path[index:] -+ -+ -+def insert_pipenv_dirs(): -+ insert_before_site_packages(os.path.dirname(PIPENV_DIR), PATCHED_DIR, VENDORED_DIR) ++from pipenv.patched.safety.cli import cli if __name__ == "__main__": # pragma: no cover -+ insert_pipenv_dirs() -+ from safety.cli import cli - cli(prog_name="safety") From 69adebe597c01d8233efd25f4254149912de1606 Mon Sep 17 00:00:00 2001 From: Yeison Vargas Date: Wed, 14 Sep 2022 21:28:58 -0500 Subject: [PATCH 002/200] Improving pipenv check command with the new Safety options (#5355) --- pipenv/cli/command.py | 26 +++++++++++-- pipenv/core.py | 90 ++++++++++++++++++++++++++----------------- 2 files changed, 78 insertions(+), 38 deletions(-) diff --git a/pipenv/cli/command.py b/pipenv/cli/command.py index f05c3d45d0..43d38e1eba 100644 --- a/pipenv/cli/command.py +++ b/pipenv/cli/command.py @@ -474,7 +474,7 @@ def run(state, command, args): "--db", nargs=1, default=lambda: os.environ.get("PIPENV_SAFETY_DB"), - help="Path to a local PyUp Safety vulnerabilities database." + help="Path or URL to a PyUp Safety vulnerabilities database." " Default: ENV PIPENV_SAFETY_DB or None.", ) @option( @@ -485,7 +485,7 @@ def run(state, command, args): ) @option( "--output", - type=Choice(["default", "json", "full-report", "bare"]), + type=Choice(["default", "json", "full-report", "bare", 'screen', 'text']), default="default", help="Translates to --json, --full-report or --bare from PyUp Safety check", ) @@ -498,6 +498,16 @@ def run(state, command, args): @option( "--quiet", is_flag=True, help="Quiet standard output, except vulnerability report." ) +@option("--policy-file", default='', + help="Define the policy file to be used") +@option("--exit-code/--continue-on-error", default=True, + help="Output standard exit codes. Default: --exit-code") +@option("--audit-and-monitor/--disable-audit-and-monitor", default=True, + help="Send results back to pyup.io for viewing on your dashboard. Requires an API key.") +@option("--project", default=None, + help="Project to associate this scan with on pyup.io. Defaults to a canonicalized github style name if available, otherwise unknown") +@option("--save-json", default="", help="Path to where output file will be placed, if the path is a directory, " + "Safety will use safety-report.json as filename. Default: empty") @common_options @system_option @pass_state @@ -506,9 +516,14 @@ def check( db=None, style=False, ignore=None, - output="default", + output="screen", key=None, quiet=False, + exit_code=True, + policy_file="", + save_json="", + audit_and_monitor=True, + project=None, **kwargs, ): """Checks for PyUp Safety security vulnerabilities and against PEP 508 markers provided in Pipfile.""" @@ -524,6 +539,11 @@ def check( output=output, key=key, quiet=quiet, + exit_code=exit_code, + policy_file=policy_file, + save_json=save_json, + audit_and_monitor=audit_and_monitor, + safety_project=project, pypi_mirror=state.pypi_mirror, ) diff --git a/pipenv/core.py b/pipenv/core.py index 9a82df6317..0c07964384 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -2657,9 +2657,14 @@ def do_check( system=False, db=None, ignore=None, - output="default", + output="screen", key=None, quiet=False, + exit_code=True, + policy_file="", + save_json="", + audit_and_monitor=True, + safety_project=None, pypi_mirror=None, ): import json @@ -2747,7 +2752,8 @@ def do_check( ignored = [["--ignore", cve] for cve in ignore] if not quiet and not project.s.is_quiet(): click.echo( - "Notice: Ignoring CVE(s) {}".format( + "Notice: Ignoring Vulnerabilit{} {}".format( + 'ies' if len(ignored) > 1 else 'y', click.style(", ".join(ignore), fg="yellow") ), err=True, @@ -2755,49 +2761,63 @@ def do_check( else: ignored = [] - switch = output - if output == "default": - switch = "json" + options = [ + "--audit-and-monitor" if audit_and_monitor else + "--disable-audit-and-monitor", + "--exit-code" if exit_code else "--continue-on-error" + ] + + if output == 'full-report': + options.append("--full-report") + elif output not in ['screen', 'default']: + options.append(f"--output={output}") + + if save_json: + options.append(f'--save-json={save_json}') + + if policy_file: + options.append(f"--policy-file={policy_file}") + + if safety_project: + options.append(f'--project={safety_project}') + + cmd = _cmd + [safety_path, "--debug", "check"] + options - cmd = _cmd + [safety_path, "check", f"--{switch}"] if db: if not quiet and not project.s.is_quiet(): - click.echo(click.style(f"Using local database {db}")) + click.echo(click.style(f"Using {db} database")) cmd.append(f"--db={db}") elif key or project.s.PIPENV_PYUP_API_KEY: cmd = cmd + [f"--key={key or project.s.PIPENV_PYUP_API_KEY}"] + else: + # TODO: Define the source + PIPENV_SAFETY_DB = "https://raw.githubusercontent.com/" \ + "pyupio/safety-db/master/data/" + cmd.append(f"--db={PIPENV_SAFETY_DB}") + if ignored: for cve in ignored: cmd += cve - c = run_command(cmd, catch_exceptions=False, is_verbose=project.s.is_verbose()) - if output == "default": - try: - results = simplejson.loads(c.stdout) - except (ValueError, json.JSONDecodeError): - raise exceptions.JSONParseError(c.stdout, c.stderr) - except Exception: - raise exceptions.PipenvCmdError( - cmd_list_to_shell(c.args), c.stdout, c.stderr, c.returncode - ) - for (package, resolved, installed, description, vuln, *_) in results: - click.echo( - "{}: {} {} resolved ({} installed)!".format( - click.style(vuln, bold=True), - click.style(package, fg="green"), - click.style(resolved, fg="yellow", bold=False), - click.style(installed, fg="yellow", bold=True), - ) - ) - click.echo(f"{description}") - click.echo() - if c.returncode == 0: - click.echo(click.style("All good!", fg="green")) - sys.exit(0) - else: - sys.exit(1) - else: + click.secho("Running the command", fg="red") + + safety_env = os.environ.copy() + safety_env["SAFETY_CUSTOM_INTEGRATION"] = 'True' + safety_env["SAFETY_ANNOUNCEMENTS_URL"] = 'https://foo-bar' # TODO: Define the source + safety_env["SAFETY_SOURCE"] = 'pipenv' + + c = run_command(cmd, catch_exceptions=False, + is_verbose=project.s.is_verbose(), + env=safety_env) + + if c.stdout: click.echo(c.stdout) - sys.exit(c.returncode) + elif c.stderr: + raise exceptions.PipenvCmdError( + cmd_list_to_shell(c.args), c.stdout, c.stderr, c.returncode + ) + + # Let to Safety handles the exit code behavior + sys.exit(c.returncode) def do_graph(project, bare=False, json=False, json_tree=False, reverse=False): From d24efe29e8951c34bb45c424b881a49f5c4bcf92 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 10:45:02 -0400 Subject: [PATCH 003/200] add back release vendoring import -- unintentionally removed. --- tasks/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tasks/__init__.py b/tasks/__init__.py index fb060ad102..27e9bd519f 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -4,8 +4,9 @@ import invoke -from . import vendoring +from . import release, vendoring ROOT = Path(".").parent.parent.absolute() -ns = invoke.Collection(vendoring) +ns = invoke.Collection(vendoring, release, release.clean_mdchangelog) + From 646d518342833b5bf248a6b4ab02fa76202ce0db Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 10:45:30 -0400 Subject: [PATCH 004/200] Release v2022.8.5 --- CHANGELOG.rst | 33 ++ news/4974.feature.rst | 1 - news/5165.removal.rst | 1 - news/5188.vendor.rst | 5 - news/5204.bugfix.rst | 1 - news/5206.bugfix.rst | 1 - news/5210.bugfix.rst | 2 - news/5215.vendor.rst | 1 - pipenv/__version__.py | 2 +- pipenv/pipenv.1 | 1171 ++++++++++++++++++++++++++++++++++------- 10 files changed, 1019 insertions(+), 199 deletions(-) delete mode 100644 news/4974.feature.rst delete mode 100644 news/5165.removal.rst delete mode 100644 news/5188.vendor.rst delete mode 100644 news/5204.bugfix.rst delete mode 100644 news/5206.bugfix.rst delete mode 100644 news/5210.bugfix.rst delete mode 100644 news/5215.vendor.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 763432b529..294e5d9d88 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,36 @@ +2022.7.24 (2022-08-05) +====================== + + +Features & Improvements +----------------------- + +- support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. `#4974 `_ + +Bug Fixes +--------- + +- Remove usages of ``pip_shims`` from the non vendored ``pipenv`` code, but retain initialization for ``requirementslib`` still has usages. `#5204 `_ +- Fix case sensitivity of color name ``red`` in exception when getting hashes from pypi in ``_get_hashes_from_pypi``. `#5206 `_ +- Write output from ``subprocess_run`` directly to ``stdout`` instead of creating temporary file. + Remove deprecated ``distutils.sysconfig``, use ``sysconfig``. `#5210 `_ + +Vendored Libraries +------------------ + +- * Rename patched ``notpip`` to ``pip`` in order to be clear that its a patched version of pip. + * Remove the part of _post_pip_import.patch that overrode the standalone pip to be the user installed pip, + now we fully rely on our vendored and patched ``pip``, even for all types of installs. + * Vendor in the next newest version of ``pip==22.2`` + * Modify patch for ``pipdeptree`` to not use ``pip-shims`` `#5188 `_ +- * Remove vendored ``urllib3`` in favor of using it from vendored version in ``pip._vendor`` `#5215 `_ + +Removals and Deprecations +------------------------- + +- Remove tests that have been for a while been marked skipped and are no longer relevant. `#5165 `_ + + 2022.7.24 (2022-07-24) ====================== diff --git a/news/4974.feature.rst b/news/4974.feature.rst deleted file mode 100644 index 3f88944983..0000000000 --- a/news/4974.feature.rst +++ /dev/null @@ -1 +0,0 @@ -support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. diff --git a/news/5165.removal.rst b/news/5165.removal.rst deleted file mode 100644 index 08760a2dda..0000000000 --- a/news/5165.removal.rst +++ /dev/null @@ -1 +0,0 @@ -Remove tests that have been for a while been marked skipped and are no longer relevant. diff --git a/news/5188.vendor.rst b/news/5188.vendor.rst deleted file mode 100644 index 636116a0a5..0000000000 --- a/news/5188.vendor.rst +++ /dev/null @@ -1,5 +0,0 @@ -* Rename patched ``notpip`` to ``pip`` in order to be clear that its a patched version of pip. -* Remove the part of _post_pip_import.patch that overrode the standalone pip to be the user installed pip, -now we fully rely on our vendored and patched ``pip``, even for all types of installs. -* Vendor in the next newest version of ``pip==22.2`` -* Modify patch for ``pipdeptree`` to not use ``pip-shims`` diff --git a/news/5204.bugfix.rst b/news/5204.bugfix.rst deleted file mode 100644 index a2eda77d5f..0000000000 --- a/news/5204.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Remove usages of ``pip_shims`` from the non vendored ``pipenv`` code, but retain initialization for ``requirementslib`` still has usages. diff --git a/news/5206.bugfix.rst b/news/5206.bugfix.rst deleted file mode 100644 index ee4458eadb..0000000000 --- a/news/5206.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix case sensitivity of color name ``red`` in exception when getting hashes from pypi in ``_get_hashes_from_pypi``. diff --git a/news/5210.bugfix.rst b/news/5210.bugfix.rst deleted file mode 100644 index e4f8caef4e..0000000000 --- a/news/5210.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Write output from ``subprocess_run`` directly to ``stdout`` instead of creating temporary file. -Remove deprecated ``distutils.sysconfig``, use ``sysconfig``. diff --git a/news/5215.vendor.rst b/news/5215.vendor.rst deleted file mode 100644 index 01298cfb27..0000000000 --- a/news/5215.vendor.rst +++ /dev/null @@ -1 +0,0 @@ -* Remove vendored ``urllib3`` in favor of using it from vendored version in ``pip._vendor`` diff --git a/pipenv/__version__.py b/pipenv/__version__.py index 5a17582e36..c981b3a0d3 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.7.25.dev0" +__version__ = "2022.8.5" diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index a9c305b329..436c3296dc 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -1,8 +1,5 @@ .\" Man page generated from reStructuredText. . -.TH "PIPENV" "1" "Jan 08, 2022" "2022.1.8" "pipenv" -.SH NAME -pipenv \- pipenv Documentation . .nr rst2man-indent-level 0 . @@ -30,6 +27,9 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. +.TH "PIPENV" "1" "Aug 05, 2022" "2022.8.5" "pipenv" +.SH NAME +pipenv \- pipenv Documentation \fI\%\fP\fI\%\fP\fI\%\fP .sp .ce @@ -42,7 +42,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .sp It automatically creates and manages a virtualenv for your projects, as well as adds/removes packages from your \fBPipfile\fP as you install/uninstall packages. It also generates the ever\-important \fBPipfile.lock\fP, which is used to produce deterministic builds. .sp -Pipenv is primarily meant to provide users and developers of applications with an easy method to setup a working environment. For the distinction between libraries and applications and the usage of \fBsetup.py\fP vs \fBPipfile\fP to define dependencies, see pipfile\-vs\-setuppy\&. +Pipenv is primarily meant to provide users and developers of applications with an easy method to setup a working environment. For the distinction between libraries and applications and the usage of \fBsetup.py\fP vs \fBPipfile\fP to define dependencies, see \fI\%☤ Pipfile vs setup.py\fP\&. [image: a short animation of pipenv at work] [image] .sp @@ -101,7 +101,7 @@ $ brew install pipenv .UNINDENT .UNINDENT .sp -More detailed installation instructions can be found in the installing\-pipenv chapter. +More detailed installation instructions can be found in the \fI\%☤ Installing Pipenv\fP chapter. .sp ✨🍰✨ .SS Pipenv & Virtual Environments @@ -343,6 +343,16 @@ $ pipenv install requests .UNINDENT .UNINDENT .sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +Pipenv is designed to be used by non\-privileged OS users. It is not meant +to install or handle packages for the whole OS. Running Pipenv as \fBroot\fP +or with \fBsudo\fP (or \fBAdmin\fP on Windows) is highly discouraged and might +lead to unintend breakage of your OS. +.UNINDENT +.UNINDENT +.sp Pipenv will install the excellent \fI\%Requests\fP library and create a \fBPipfile\fP for you in your project\(aqs directory. The \fBPipfile\fP is used to track which dependencies your project needs in case you need to re\-install them, such as @@ -353,30 +363,31 @@ when you share your project with others. You should get output similar to this .sp .nf .ft C -Creating a Pipfile for this project... +pipenv install requests Creating a virtualenv for this project... -Using base prefix \(aq/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6\(aq -New python executable in ~/.local/share/virtualenvs/tmp\-agwWamBd/bin/python3.6 -Also creating executable in ~/.local/share/virtualenvs/tmp\-agwWamBd/bin/python -Installing setuptools, pip, wheel...done. +Pipfile: /home/user/myproject/Pipfile +sing /home/user/.local/share/virtualenvs/pipenv\-Cv0J3wbi/bin/python3.9 (3.9.9) to create virtualenv... + Creating virtual environment...created virtual environment CPython3.9.9.final.0\-64 in 1142ms + creator CPython3Posix(dest=/home/user/.local/share/virtualenvs/myproject\-R3jRVewK, clear=False, no_vcs_ignore=False, global=False) + seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=/home/user/.local/share/virtualenv) + added seed packages: pip==21.3.1, setuptools==60.2.0, wheel==0.37.1 + activators BashActivator,CShellActivator,FishActivator,NushellActivator,PowerShellActivator,PythonActivator -Virtualenv location: ~/.local/share/virtualenvs/tmp\-agwWamBd +✔ Successfully created virtual environment! +Virtualenv location: /home/user/.local/share/virtualenvs/pms\-R3jRVewK +Creating a Pipfile for this project... Installing requests... -Collecting requests - Using cached requests\-2.18.4\-py2.py3\-none\-any.whl -Collecting idna<2.7,>=2.5 (from requests) - Using cached idna\-2.6\-py2.py3\-none\-any.whl -Collecting urllib3<1.23,>=1.21.1 (from requests) - Using cached urllib3\-1.22\-py2.py3\-none\-any.whl -Collecting chardet<3.1.0,>=3.0.2 (from requests) - Using cached chardet\-3.0.4\-py2.py3\-none\-any.whl -Collecting certifi>=2017.4.17 (from requests) - Using cached certifi\-2017.7.27.1\-py2.py3\-none\-any.whl -Installing collected packages: idna, urllib3, chardet, certifi, requests -Successfully installed certifi\-2017.7.27.1 chardet\-3.0.4 idna\-2.6 requests\-2.18.4 urllib3\-1.22 - Adding requests to Pipfile\(aqs [packages]... -P.S. You have excellent taste! ✨ 🍰 ✨ +Installation Succeeded +Pipfile.lock not found, creating... +Locking [dev\-packages] dependencies... +Locking [packages] dependencies... +Building requirements... +Resolving dependencies... +✔ Success! +Updated Pipfile.lock (fe5a22)! +Installing dependencies from Pipfile.lock (fe5a22)... +🐍 ▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉ 0/0 — 00:00:00 .ft P .fi .UNINDENT @@ -442,6 +453,281 @@ You might want to set \fBexport PIPENV_VENV_IN_PROJECT=1\fP in your .bashrc/.zsh .sp Congratulations, you now know how to install and use Python packages! ✨ 🍰 ✨ .SS Release and Version History +.SS 2022.7.24 (2022\-08\-05) +.SS Features & Improvements +.INDENT 0.0 +.IP \(bu 2 +support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. \fI\%#4974\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Remove usages of \fBpip_shims\fP from the non vendored \fBpipenv\fP code, but retain initialization for \fBrequirementslib\fP still has usages. \fI\%#5204\fP +.IP \(bu 2 +Fix case sensitivity of color name \fBred\fP in exception when getting hashes from pypi in \fB_get_hashes_from_pypi\fP\&. \fI\%#5206\fP +.IP \(bu 2 +Write output from \fBsubprocess_run\fP directly to \fBstdout\fP instead of creating temporary file. +Remove deprecated \fBdistutils.sysconfig\fP, use \fBsysconfig\fP\&. \fI\%#5210\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +.INDENT 2.0 +.IP \(bu 2 +Rename patched \fBnotpip\fP to \fBpip\fP in order to be clear that its a patched version of pip. +.IP \(bu 2 +Remove the part of _post_pip_import.patch that overrode the standalone pip to be the user installed pip, +.UNINDENT +.sp +now we fully rely on our vendored and patched \fBpip\fP, even for all types of installs. +* Vendor in the next newest version of \fBpip==22.2\fP +* Modify patch for \fBpipdeptree\fP to not use \fBpip\-shims\fP \fI\%#5188\fP +.IP \(bu 2 +.INDENT 2.0 +.IP \(bu 2 +Remove vendored \fBurllib3\fP in favor of using it from vendored version in \fBpip._vendor\fP \fI\%#5215\fP +.UNINDENT +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Remove tests that have been for a while been marked skipped and are no longer relevant. \fI\%#5165\fP +.UNINDENT +.SS 2022.7.24 (2022\-07\-24) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Re\-enabled three installs tests again on the Windows CI as recent refactor work has fixed them. \fI\%#5064\fP +.IP \(bu 2 +Support ANSI \fBNO_COLOR\fP environment variable and deprecate \fBPIPENV_COLORBLIND\fP variable, which will be removed after this release. \fI\%#5158\fP +.IP \(bu 2 +Fixed edge case where a non\-editable file, url or vcs would overwrite the value \fBno_deps\fP for all other requirements in the loop causing a retry condition. \fI\%#5164\fP +.IP \(bu 2 +Vendor in latest \fBrequirementslib\fP for fix to lock when using editable VCS module with specific \fB@\fP git reference. \fI\%#5179\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Remove crayons and replace with click.secho and click.styles per \fI\%https://github.com/pypa/pipenv/issues/3741\fP \fI\%#3741\fP +.IP \(bu 2 +Vendor in latest version of \fBpip==22.1.2\fP which upgrades \fBpipenv\fP from \fBpip==22.0.4\fP\&. +Vendor in latest version of \fBrequirementslib==1.6.7\fP which includes a fix for tracebacks on encountering Annotated variables. +Vendor in latest version of \fBpip\-shims==0.7.3\fP such that imports could be rewritten to utilize \fBpackaging\fP from vendor\(aqd \fBpip\fP\&. +Drop the \fBpackaging\fP requirement from the \fBvendor\fP directory in \fBpipenv\fP\&. \fI\%#5147\fP +.IP \(bu 2 +Remove unused vendored dependency \fBnormailze\-charset\fP\&. \fI\%#5161\fP +.IP \(bu 2 +Remove obsolete package \fBfuncsigs\fP\&. \fI\%#5168\fP +.IP \(bu 2 +Bump vendored dependency \fBpyparsing==3.0.9\fP\&. \fI\%#5170\fP +.UNINDENT +.SS 2022.7.4 (2022\-07\-04) +.SS Behavior Changes +.INDENT 0.0 +.IP \(bu 2 +Adjust \fBpipenv requirements\fP to add markers and add an \fB\-\-exclude\-markers\fP option to allow the exclusion of markers. \fI\%#5092\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Stopped expanding environment variables when using \fBpipenv requirements\fP \fI\%#5134\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Depend on \fBrequests\fP and \fBcertifi\fP from vendored \fBpip\fP and remove them as explicit vendor dependencies. \fI\%#5000\fP +.IP \(bu 2 +Vendor in the latest version of \fBrequirementslib==1.6.5\fP which includes bug fixes for beta python versions, projects with an at sign (@) in the path, and a \fBsetuptools\fP deprecation warning. \fI\%#5132\fP +.UNINDENT +.SS Relates to dev process changes +.INDENT 0.0 +.IP \(bu 2 +Switch from using type comments to type annotations. +.UNINDENT +.SS 2022.5.3.dev0 (2022\-06\-07) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Adjust pipenv to work with the newly added \fBvenv\fP install scheme in Python. +First check if \fBvenv\fP is among the available install schemes, and use it if it is. Otherwise fall back to the \fBnt\fP or \fBposix_prefix\fP install schemes as before. This should produce no change for environments where the install schemes were not redefined. \fI\%#5096\fP +.UNINDENT +.SS 2022.5.2 (2022\-05\-02) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Fixes issue of \fBpipenv lock \-r\fP command printing to stdout instead of stderr. \fI\%#5091\fP +.UNINDENT +.SS 2022.4.30 (2022\-04\-30) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Fixes issue of \fBrequirements\fP command problem by modifying to print \fB\-e\fP and path of the editable package. \fI\%#5070\fP +.IP \(bu 2 +Revert specifier of \fBsetuptools\fP requirement in \fBsetup.py\fP back to what it was in order to fix \fBFileNotFoundError: [Errno 2]\fP issue report. \fI\%#5075\fP +.IP \(bu 2 +Fixes issue of requirements command where git requirements cause the command to fail, solved by using existing convert_deps_to_pip function. \fI\%#5076\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Vendor in \fBrequirementslib==1.6.4\fP to Fix \fBSetuptoolsDeprecationWarning\fP \fBsetuptools.config.read_configuration\fP became deprecated. \fI\%#5081\fP +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Remove more usage of misc functions of vistir. Many of this function are availabel in the STL or in another dependency of pipenv. \fI\%#5078\fP +.UNINDENT +.SS 2022.4.21 (2022\-04\-21) +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Updated setup.py to remove support for python 3.6 from built \fBpipenv\fP packages\(aq Metadata. \fI\%#5065\fP +.UNINDENT +.SS 2022.4.20 (2022\-04\-20) +.SS Features & Improvements +.INDENT 0.0 +.IP \(bu 2 +Added new Pipenv option \fBinstall_search_all_sources\fP that allows installation of packages from an +existing \fBPipfile.lock\fP to search all defined indexes for the constrained package version and hash signatures. \fI\%#5041\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +allow the user to disable the \fBno_input\fP flag, so the use of e.g Google Artifact Registry is possible. \fI\%#4706\fP +.IP \(bu 2 +Fixes case where packages could fail to install and the exit code was successful. \fI\%#5031\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Updated vendor version of \fBpip\fP from \fB21.2.2\fP to \fB22.0.4\fP which fixes a number of bugs including +several reports of pipenv locking for an infinite amount of time when using certain package constraints. +This also drops support for python 3.6 as it is EOL and support was removed in pip 22.x \fI\%#4995\fP +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Removed the vendor dependency \fBmore\-itertools\fP as it was originally added for \fBzipp\fP, which since stopped using it. \fI\%#5044\fP +.IP \(bu 2 +Removed all usages of \fBpipenv.vendor.vistir.compat.fs_str\fP, since this function was used for PY2\-PY3 compatability and is no longer needed. \fI\%#5062\fP +.UNINDENT +.SS Relates to dev process changes +.INDENT 0.0 +.IP \(bu 2 +Added pytest\-cov and basic configuration to the project for generating html testing coverage reports. +.IP \(bu 2 +Make all CI jobs run only after the lint stage. Also added a makefile target for vendoring the packages. +.UNINDENT +.SS 2022.4.8 (2022\-04\-08) +.SS Features & Improvements +.INDENT 0.0 +.IP \(bu 2 +Implements a \fBpipenv requirements\fP command which generates a requirements.txt compatible output without locking. \fI\%#4959\fP +.IP \(bu 2 +Internal to pipenv, the utils.py was split into a utils module with unused code removed. \fI\%#4992\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Pipenv will now ignore \fB\&.venv\fP in the project when \fBPIPENV_VENV_IN_PROJECT\fP variable is False. +Unset variable maintains the existing behavior of preferring to use the project\(aqs \fB\&.venv\fP should it exist. \fI\%#2763\fP +.IP \(bu 2 +Fix an edge case of hash collection in index restricted packages whereby the hashes for some packages would +be missing from the \fBPipfile.lock\fP following package index restrictions added in \fBpipenv==2022.3.23\fP\&. \fI\%#5023\fP +.UNINDENT +.SS Improved Documentation +.INDENT 0.0 +.IP \(bu 2 +Pipenv CLI documentation generation has been fixed. It had broke when \fBclick\fP was vendored into the project in +\fB2021.11.9\fP because by default \fBsphinx\-click\fP could no longer determine the CLI inherited from click. \fI\%#4778\fP +.IP \(bu 2 +Improve documentation around extra indexes and index restricted packages. \fI\%#5022\fP +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Removes the optional \fBinstall\fP argument \fB\-\-extra\-index\-url\fP as it was not compatible with index restricted packages. +Using the \fB\-\-index\fP argument is the correct way to specify a package should be pulled from the non\-default index. \fI\%#5022\fP +.UNINDENT +.SS Relates to dev process changes +.INDENT 0.0 +.IP \(bu 2 +Added code linting using pre\-commit\-hooks, black, flake8, isort, pygrep\-hooks, news\-fragments and check\-manifest. +Very similar to pip\(aqs configuration; adds a towncrier new\(aqs type \fBprocess\fP for change to Development processes. +.UNINDENT +.SS 2022.3.28 (2022\-03\-27) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Environment variables were not being loaded when the \fB\-\-quiet\fP flag was set \fI\%#5010\fP +.IP \(bu 2 +It would appear that \fBrequirementslib\fP was not fully specifying the subdirectory to \fBbuild_pep517\fP and +and when a new version of \fBsetuptools\fP was released, the test \fBtest_lock_nested_vcs_direct_url\fP +broke indicating the Pipfile.lock no longer contained the extra dependencies that should have been resolved. +This regression affected \fBpipenv>=2021.11.9\fP but has been fixed by a patch to \fBrequirementslib\fP\&. \fI\%#5019\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Vendor in pip==21.2.4 (from 21.2.2) in order to bring in requested bug fix for python3.6. Note: support for 3.6 will be dropped in a subsequent release. \fI\%#5008\fP +.UNINDENT +.SS 2022.3.24 (2022\-03\-23) +.SS Features & Improvements +.INDENT 0.0 +.IP \(bu 2 +It is now possible to silence the \fBLoading .env environment variables\fP message on \fBpipenv run\fP +with the \fB\-\-quiet\fP flag or the \fBPIPENV_QUIET\fP environment variable. \fI\%#4027\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Fixes issue with new index safety restriction, whereby an unnamed extra sources index +caused and error to be thrown during install. \fI\%#5002\fP +.IP \(bu 2 +The text \fBLoading .env environment variables...\fP has been switched back to stderr as to not +break requirements.txt generation. Also it only prints now when a \fB\&.env\fP file is actually present. \fI\%#5003\fP +.UNINDENT +.SS 2022.3.23 (2022\-03\-22) +.SS Features & Improvements +.INDENT 0.0 +.IP \(bu 2 +Use environment variable \fBPIPENV_SKIP_LOCK\fP to control the behaviour of lock skipping. \fI\%#4797\fP +.IP \(bu 2 +New CLI command \fBverify\fP, checks the Pipfile.lock is up\-to\-date \fI\%#4893\fP +.UNINDENT +.SS Behavior Changes +.INDENT 0.0 +.IP \(bu 2 +Pattern expansion for arguments was disabled on Windows. \fI\%#4935\fP +.UNINDENT +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Python versions on Windows can now be installed automatically through pyenv\-win \fI\%#4525\fP +.IP \(bu 2 +Patched our vendored Pip to fix: Pipenv Lock (Or Install) Does Not Respect Index Specified For A Package. \fI\%#4637\fP +.IP \(bu 2 +If \fBPIP_TARGET\fP is set to environment variables, Refer specified directory for calculate delta, instead default directory \fI\%#4775\fP +.IP \(bu 2 +Remove remaining mention of python2 and \-\-two flag from codebase. \fI\%#4938\fP +.IP \(bu 2 +Use \fBCI\fP environment value, over mere existence of name \fI\%#4944\fP +.IP \(bu 2 +Environment variables from dot env files are now properly expanded when included in scripts. \fI\%#4975\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Updated vendor version of \fBpythonfinder\fP from \fB1.2.9\fP to \fB1.2.10\fP which fixes a bug with WSL +(Windows Subsystem for Linux) when a path can not be read and Permission Denied error is encountered. \fI\%#4976\fP +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +Removes long broken argument \fB\-\-code\fP from \fBinstall\fP and \fB\-\-unused\fP from \fBcheck\fP\&. +Check command no longer takes in arguments to ignore. +Removed the vendored dependencies: \fBpipreqs\fP and \fByarg\fP \fI\%#4998\fP +.UNINDENT .SS 2022.1.8 (2022\-01\-08) .SS Bug Fixes .INDENT 0.0 @@ -497,7 +783,7 @@ Replace \fBclick\-completion\fP with \fBclick\fP\(aqs own completion implementat .IP \(bu 2 Fix a bug that \fBpipenv run\fP doesn\(aqt set environment variables correctly. \fI\%#4831\fP .IP \(bu 2 -Fix a bug that certifi can\(aqt be loaded within \fBpip\fP\(aqs vendor library. This makes several objects of \fBpip\fP fail to be imported. \fI\%#4833\fP +Fix a bug that certifi can\(aqt be loaded within \fBnotpip\fP\(aqs vendor library. This makes several objects of \fBpip\fP fail to be imported. \fI\%#4833\fP .IP \(bu 2 Fix a bug that \fB3.10.0\fP can be found be python finder. \fI\%#4837\fP .UNINDENT @@ -565,7 +851,7 @@ Add new vendored dependencies .IP \(bu 2 Drop the dependencies for Python 2.7 compatibility purpose. \fI\%#4751\fP .IP \(bu 2 -Switch the dependency resolver from \fBpip\-tools\fP to \fIpip\fP\&. +Switch the dependency resolver from \fBpip\-tools\fP to \fBpip\fP\&. .sp Update vendor libraries: \- Update \fBrequirementslib\fP from \fB1.5.16\fP to \fB1.6.1\fP @@ -853,7 +1139,7 @@ Allow overriding PIPENV_INSTALL_TIMEOUT environment variable (in seconds). \fI\ .IP \(bu 2 Allow overriding PIP_EXISTS_ACTION evironment variable (value is passed to pip install). Possible values here: \fI\%https://pip.pypa.io/en/stable/reference/pip/#exists\-action\-option\fP -Useful when you need to \fIPIP_EXISTS_ACTION=i\fP (ignore existing packages) \- great for CI environments, where you need really fast setup. \fI\%#3738\fP +Useful when you need to \fBPIP_EXISTS_ACTION=i\fP (ignore existing packages) \- great for CI environments, where you need really fast setup. \fI\%#3738\fP .IP \(bu 2 Pipenv will no longer forcibly override \fBPIP_NO_DEPS\fP on all vcs and file dependencies as resolution happens on these in a pre\-lock step. \fI\%#3763\fP .IP \(bu 2 @@ -879,7 +1165,7 @@ Make sure \fBpipenv lock \-r \-\-pypi\-mirror {MIRROR_URL}\fP will respect the p .SS Bug Fixes .INDENT 0.0 .IP \(bu 2 -Raise \fIPipenvUsageError\fP when [[source]] does not contain url field. \fI\%#2373\fP +Raise \fBPipenvUsageError\fP when [[source]] does not contain url field. \fI\%#2373\fP .IP \(bu 2 Fixed a bug which caused editable package resolution to sometimes fail with an unhelpful setuptools\-related error message. \fI\%#2722\fP .IP \(bu 2 @@ -1058,28 +1344,28 @@ Update vendored dependencies and invocations .INDENT 2.0 .IP \(bu 2 Update vendored and patched dependencies -\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, +\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, .nf \(ga\(ga .fi tomlkit\(ga .IP \(bu 2 Fix invocations of dependencies -\- Fix custom +\- Fix custom .nf \(ga\(ga .fi InstallCommand\(ga instantiation -\- Update +\- Update .nf \(ga\(ga .fi PackageFinder\(ga usage -\- Fix +\- Fix .nf \(ga\(ga .fi -Bool\(ga stringify attempts from +Bool\(ga stringify attempts from .nf \(ga\(ga .fi @@ -1376,7 +1662,7 @@ Fixed an issue in \fBdelegator.py\fP related to subprocess calls when using \fBP \fI\%#3114\fP, \fI\%#3117\fP .IP \(bu 2 -Fix the path casing issue that makes \fIpipenv clean\fP fail on Windows \fI\%#3104\fP +Fix the path casing issue that makes \fBpipenv clean\fP fail on Windows \fI\%#3104\fP .IP \(bu 2 Pipenv will avoid leaving build artifacts in the current working directory. \fI\%#3106\fP .IP \(bu 2 @@ -1388,7 +1674,7 @@ Updated \fBpythonfinder\fP to correct an issue with unnesting of nested paths wh .IP \(bu 2 Added additional logic for ignoring and replacing non\-ascii characters when formatting console output on non\-UTF\-8 systems. \fI\%#3131\fP .IP \(bu 2 -Fix virtual environment discovery when \fBPIPENV_VENV_IN_PROJECT\fP is set, but the in\-project \fI\&.venv\fP is a file. \fI\%#3134\fP +Fix virtual environment discovery when \fBPIPENV_VENV_IN_PROJECT\fP is set, but the in\-project \fB\&.venv\fP is a file. \fI\%#3134\fP .IP \(bu 2 Hashes for remote and local non\-PyPI artifacts will now be included in \fBPipfile.lock\fP during resolution. \fI\%#3145\fP .IP \(bu 2 @@ -1502,7 +1788,7 @@ Upgraded \fBpythonfinder => 1.1.1\fP and \fBvistir => 0.1.7\fP\&. \fI\%#3007\fP .SS Features & Improvements .INDENT 0.0 .IP \(bu 2 -Added environment variables \fIPIPENV_VERBOSE\fP and \fIPIPENV_QUIET\fP to control +Added environment variables \fBPIPENV_VERBOSE\fP and \fBPIPENV_QUIET\fP to control output verbosity without needing to pass options. \fI\%#2527\fP .IP \(bu 2 Updated test\-PyPI add\-on to better support json\-API access (forward compatibility). @@ -1537,7 +1823,7 @@ Invoke \fBvirtualenv\fP directly for virtual environment creation, instead of de Add \fBCOMSPEC\fP to fallback option (along with \fBSHELL\fP and \fBPYENV_SHELL\fP) if shell detection fails, improving robustness on Windows. \fI\%#2651\fP .IP \(bu 2 -Fallback to shell mode if \fIrun\fP fails with Windows error 193 to handle non\-executable commands. This should improve usability on Windows, where some users run non\-executable files without specifying a command, relying on Windows file association to choose the current command. \fI\%#2718\fP +Fallback to shell mode if \fBrun\fP fails with Windows error 193 to handle non\-executable commands. This should improve usability on Windows, where some users run non\-executable files without specifying a command, relying on Windows file association to choose the current command. \fI\%#2718\fP .UNINDENT .SS Bug Fixes .INDENT 0.0 @@ -1658,7 +1944,7 @@ Fixed a bug in the dependency resolver which caused regular issues when handling \fI\%#2867\fP, \fI\%#2880\fP .IP \(bu 2 -Fixed a bug where \fIpipenv\fP crashes when the \fIWORKON_HOME\fP directory does not exist. \fI\%#2877\fP +Fixed a bug where \fBpipenv\fP crashes when the \fBWORKON_HOME\fP directory does not exist. \fI\%#2877\fP .IP \(bu 2 Fixed pip is not loaded from pipenv\(aqs patched one but the system one \fI\%#2912\fP .IP \(bu 2 @@ -1750,7 +2036,7 @@ Added a link to \fBPEP\-440\fP version specifiers in the documentation for addit .IP \(bu 2 Added simple example to README.md for installing from git. \fI\%#2685\fP .IP \(bu 2 -Stopped recommending \fI\-\-system\fP for Docker contexts. \fI\%#2762\fP +Stopped recommending \fB\-\-system\fP for Docker contexts. \fI\%#2762\fP .IP \(bu 2 Fixed the example url for doing "pipenv install \-e some\-repository\-url#egg=something", it was missing the "egg=" in the fragment @@ -1758,7 +2044,7 @@ identifier. \fI\%#2792\fP .IP \(bu 2 Fixed link to the "be cordial" essay in the contribution documentation. \fI\%#2793\fP .IP \(bu 2 -Clarify \fIpipenv install\fP documentation \fI\%#2844\fP +Clarify \fBpipenv install\fP documentation \fI\%#2844\fP .IP \(bu 2 Replace reference to uservoice with PEEP\-000 \fI\%#2909\fP .UNINDENT @@ -2058,8 +2344,6 @@ When no parameters are passed to \fBinstall\fP, all packages \fB[packages]\fP sp .IP \(bu 2 To initialize a Python 3 virtual environment, run \fB$ pipenv \-\-three\fP\&. .IP \(bu 2 -To initialize a Python 2 virtual environment, run \fB$ pipenv \-\-two\fP\&. -.IP \(bu 2 Otherwise, whatever virtualenv defaults to will be the default. .UNINDENT .SS Other Commands @@ -2084,6 +2368,7 @@ Pipfiles contain information for the dependencies of the project, and supersedes the requirements.txt file used in most Python projects. You should add a Pipfile in the Git repository letting users who clone the repository know the only thing required would be installing Pipenv in the machine and typing \fBpipenv install\fP\&. Pipenv is a reference +.sp implementation for using Pipfile. .sp Here is a simple example of a \fBPipfile\fP and the resulting \fBPipfile.lock\fP\&. @@ -2207,7 +2492,7 @@ Generally, keep both \fBPipfile\fP and \fBPipfile.lock\fP in version control. .IP \(bu 2 Do not keep \fBPipfile.lock\fP in version control if multiple versions of Python are being targeted. .IP \(bu 2 -Specify your target Python version in your \fIPipfile\fP\(aqs \fB[requires]\fP section. Ideally, you should only have one target Python version, as this is a deployment tool. \fBpython_version\fP should be in the format \fBX.Y\fP (or \fBX\fP) and \fBpython_full_version\fP should be in \fBX.Y.Z\fP format. +Specify your target Python version in your \fBPipfile\fP\(aqs \fB[requires]\fP section. Ideally, you should only have one target Python version, as this is a deployment tool. \fBpython_version\fP should be in the format \fBX.Y\fP (or \fBX\fP) and \fBpython_full_version\fP should be in \fBX.Y.Z\fP format. .IP \(bu 2 \fBpipenv install\fP is fully compatible with \fBpip install\fP syntax, for which the full documentation can be found \fI\%here\fP\&. .IP \(bu 2 @@ -2484,8 +2769,6 @@ The user can provide these additional parameters: .INDENT 3.5 .INDENT 0.0 .IP \(bu 2 -\fB\-\-two\fP — Performs the installation in a virtualenv using the system \fBpython2\fP link. -.IP \(bu 2 \fB\-\-three\fP — Performs the installation in a virtualenv using the system \fBpython3\fP link. .IP \(bu 2 \fB\-\-python\fP — Performs the installation in a virtualenv using the provided Python interpreter. @@ -2499,17 +2782,6 @@ None of the above commands should be used together. They are also it with an appropriately versioned one. .UNINDENT .UNINDENT -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -The virtualenv created by Pipenv may be different from what you were expecting. -Dangerous characters (i.e. \fB$\(ga!*@"\fP as well as space, line feed, carriage return, -and tab) are converted to underscores. Additionally, the full path to the current -folder is encoded into a "slug value" and appended to ensure the virtualenv name -is unique. -.UNINDENT -.UNINDENT .INDENT 0.0 .IP \(bu 2 \fB\-\-dev\fP — Install both \fBdevelop\fP and \fBdefault\fP packages from \fBPipfile\fP\&. @@ -2570,7 +2842,7 @@ The shell launched in interactive mode. This means that if your shell reads its .UNINDENT .UNINDENT .sp -If you experience issues with \fB$ pipenv shell\fP, just check the \fBPIPENV_SHELL\fP environment variable, which \fB$ pipenv shell\fP will use if available. For detail, see configuration\-with\-environment\-variables\&. +If you experience issues with \fB$ pipenv shell\fP, just check the \fBPIPENV_SHELL\fP environment variable, which \fB$ pipenv shell\fP will use if available. For detail, see \fI\%☤ Configuration With Environment Variables\fP\&. .SS ☤ A Note about VCS Dependencies .sp You can install packages with pipenv from git and other version control systems using URLs formatted according to the following rule: @@ -2633,6 +2905,135 @@ This will not include hashes, however. To get a \fBrequirements.txt\fP you can also use \fB$ pipenv run pip freeze\fP\&. .UNINDENT .UNINDENT +.SS ☤ Pipenv and Docker Containers +.sp +In general, you should not have Pipenv inside a linux container image, since +it is a build tool. If you want to use it to build, and install the run time +dependencies for your application, you can use a multi stage build for creating +a virtual environment with your dependencies. In this approach, +Pipenv in installed in the base layer, it is then used to create the virtual +environment. In a later stage, in a \fBruntime\fP layer the virtual environment +is copied from the base layer, the layer containing pipenv and other build +dependencies is discarded. +This results in a smaller image, which can still run your application. +Here is an example \fBDockerfile\fP, which you can use as a starting point for +doing a multi stage build for your application: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +FROM docker.io/python:3.9 AS builder + +RUN pip install \-\-user pipenv + +# Tell pipenv to create venv in the current directory +ENV PIPENV_VENV_IN_PROJECT=1 + +# Pipefile contains requests +ADD Pipfile.lock Pipfile /usr/src/ + +WORKDIR /usr/src + +# NOTE: If you install binary packages required for a python module, you need +# to install them again in the runtime. For example, if you need to install pycurl +# you need to have pycurl build dependencies libcurl4\-gnutls\-dev and libcurl3\-gnutls +# In the runtime container you need only libcurl3\-gnutls + +# RUN apt install \-y libcurl3\-gnutls libcurl4\-gnutls\-dev + +RUN /root/.local/bin/pipenv sync + +RUN /usr/src/.venv/bin/python \-c "import requests; print(requests.__version__)" + +FROM docker.io/python:3.9 AS runtime + +RUN mkdir \-v /usr/src/venv + +COPY \-\-from=builder /usr/src/.venv/ /usr/src/venv/ + +RUN /usr/src/venv/bin/python \-c "import requests; print(requests.__version__)" + +# HERE GOES ANY CODE YOU NEED TO ADD TO CREATE YOUR APPLICATION\(aqS IMAGE +# For example +# RUN apt install \-y libcurl3\-gnutls +# RUN adduser \-\-uid 123123 coolio + +WORKDIR /usr/src/ + +USER coolio + +CMD ["./venv/bin/python", "\-m", "run.py"] +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +Pipenv is not meant to run as root. However, in the multi stage build above +it is done never the less. A calculated risk, since the intermediatiary image +is discarded. +The runtime image later shows that you should create a user and user it to +run your applicaion. +\fBOnce again, you should not run pipenv as root (or Admin on Windows) normally. +This could lead to breakage of your Python installation, or even your complete +OS.\fP +.UNINDENT +.UNINDENT +.sp +When you build an image with this example (assuming requests is found in Pipefile), you +will see that \fBrequests\fP is installed in the \fBruntime\fP image: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ sudo docker build \-\-no\-cache \-t oz/123:0.1 . +Sending build context to Docker daemon 1.122MB +Step 1/12 : FROM docker.io/python:3.9 AS builder + \-\-\-> 81f391f1a7d7 +Step 2/12 : RUN pip install \-\-user pipenv + \-\-\-> Running in b83ed3c28448 + ... trimmed ... + \-\-\-> 848743eb8c65 +Step 4/12 : ENV PIPENV_VENV_IN_PROJECT=1 + \-\-\-> Running in 814e6f5fec5b +Removing intermediate container 814e6f5fec5b + \-\-\-> 20167b4a13e1 +Step 5/12 : ADD Pipfile.lock Pipfile /usr/src/ + \-\-\-> c7632cb3d5bd +Step 6/12 : WORKDIR /usr/src + \-\-\-> Running in 1d75c6cfce10 +Removing intermediate container 1d75c6cfce10 + \-\-\-> 2dcae54cc2e5 +Step 7/12 : RUN /root/.local/bin/pipenv sync + \-\-\-> Running in 1a00b326b1ee +Creating a virtualenv for this project... +\&... trimmed ... +✔ Successfully created virtual environment! +Virtualenv location: /usr/src/.venv +Installing dependencies from Pipfile.lock (fe5a22)... +\&... trimmed ... +Step 8/12 : RUN /usr/src/.venv/bin/python \-c "import requests; print(requests.__version__)" + \-\-\-> Running in 3a66e3ce4a11 +2.27.1 +Removing intermediate container 3a66e3ce4a11 + \-\-\-> 1db657d0ac17 +Step 9/12 : FROM docker.io/python:3.9 AS runtime +\&... trimmed ... +Step 12/12 : RUN /usr/src/venv/bin/python \-c "import requests; print(requests.__version__)" + \-\-\-> Running in fa39ba4080c5 +2.27.1 +Removing intermediate container fa39ba4080c5 + \-\-\-> 2b1c90fd414e +Successfully built 2b1c90fd414e +Successfully tagged oz/123:0.1 +.ft P +.fi +.UNINDENT +.UNINDENT .SS Advanced Usage of Pipenv [image] .sp @@ -2648,7 +3049,10 @@ Installation is intended to be as deterministic as possible —\ use the \fB\-\- .UNINDENT .SS ☤ Specifying Package Indexes .sp -If you\(aqd like a specific package to be installed with a specific package index, you can do the following: +Starting in release \fB2022.3.23\fP all packages are mapped only to a single package index for security reasons. +All unspecified packages are resolved using the default index source; the default package index is PyPI. +.sp +For a specific package to be installed from an alternate package index, you must match the name of the index as in the following example: .INDENT 0.0 .INDENT 3.5 .sp @@ -2660,55 +3064,101 @@ verify_ssl = true name = "pypi" [[source]] -url = "http://pypi.home.kennethreitz.org/simple" +url = "https://download.pytorch.org/whl/cu113/" verify_ssl = false -name = "home" +name = "pytorch" [dev\-packages] [packages] -requests = {version="*", index="home"} -maya = {version="*", index="pypi"} -records = "*" +torch = {version="*", index="pytorch"} +numpy = {version="*"} .ft P .fi .UNINDENT .UNINDENT .sp -Very fancy. -.SS ☤ Using a PyPI Mirror +You may install a package such as the example \fBtorch\fP from the named index \fBpytorch\fP using the CLI by running +the following command: +.sp +\fBpipenv install \-\-index=pytorch torch\fP .sp -If you would like to override the default PyPI index URLs with the URL for a PyPI mirror, you can use the following: +Alternatively the index may be specified by full url, and it will be added to the \fBPipfile\fP with a generated name +unless it already exists in which case the existing name with be reused when pinning the package index. +.sp +\fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 +In prior versions of \fBpipenv\fP you could specify \fB\-\-extra\-index\-urls\fP to the \fBpip\fP resolver and avoid +specifically matching the expected index by name. That functionality was deprecated in favor of index restricted +packages, which is a simplifying assumption that is more security mindful. The pip documentation has the following +warning around the \fB\-\-extra\-index\-urls\fP option: .sp -.nf -.ft C -$ pipenv install \-\-pypi\-mirror - -$ pipenv update \-\-pypi\-mirror - -$ pipenv sync \-\-pypi\-mirror - -$ pipenv lock \-\-pypi\-mirror - -$ pipenv uninstall \-\-pypi\-mirror -.ft P -.fi +\fIUsing this option to search for packages which are not in the main repository (such as private packages) is unsafe, +per a security vulnerability called dependency confusion: an attacker can claim the package on the public repository +in a way that will ensure it gets chosen over the private package.\fP .UNINDENT .UNINDENT .sp -Alternatively, you can set the \fBPIPENV_PYPI_MIRROR\fP environment variable. -.SS ☤ Injecting credentials into Pipfiles via environment variables +Should you wish to use an alternative default index other than PyPI: simply do not specify PyPI as one of the +sources in your \fBPipfile\fP\&. When PyPI is omitted, then any public packages required either directly or +as sub\-dependencies must be mirrored onto your private index or they will not resolve properly. This matches the +standard recommendation of \fBpip\fP maintainers: "To correctly make a private project installable is to point +\-\-index\-url to an index that contains both PyPI and their private projects—which is our recommended best practice." .sp -Pipenv will expand environment variables (if defined) in your Pipfile. Quite -useful if you need to authenticate to a private PyPI: +The above documentation holds true for both \fBlock\fP resolution and \fBsync\fP of packages. It was suggested that +once the resolution and the lock file are updated, it is theoretically possible to safely scan multiple indexes +for these packages when running \fBpipenv sync\fP or \fBpipenv install \-\-deploy\fP since it will verify the package +hashes match the allowed hashes that were already captured from a safe locking cycle. +To enable this non\-default behavior, add \fBinstall_search_all_sources = true\fP option +to your \fBPipfile\fP in the \fBpipenv\fP section: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C -[[source]] +[pipenv] +install_search_all_sources = true +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +\fBNote:\fP The locking cycle will still requires that each package be resolved from a single index. This feature was +requested as a workaround in order to support organizations where not everyone has access to the package sources. +.SS ☤ Using a PyPI Mirror +.sp +Should you wish to override the default PyPI index URLs with the URL for a PyPI mirror, you can do the following: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ pipenv install \-\-pypi\-mirror + +$ pipenv update \-\-pypi\-mirror + +$ pipenv sync \-\-pypi\-mirror + +$ pipenv lock \-\-pypi\-mirror + +$ pipenv uninstall \-\-pypi\-mirror +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Alternatively, setting the \fBPIPENV_PYPI_MIRROR\fP environment variable is equivalent to passing \fB\-\-pypi\-mirror \fP\&. +.SS ☤ Injecting credentials into Pipfile via environment variables +.sp +Pipenv will expand environment variables (if defined) in your Pipfile. Quite +useful if you need to authenticate to a private PyPI: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +[[source]] url = "https://$USERNAME:${PASSWORD}@mypypi.example.com/simple" verify_ssl = true name = "pypi" @@ -2741,6 +3191,45 @@ requests = {git = "git://${USERNAME}:${PASSWORD}@private.git.com/psf/requests.gi .UNINDENT .sp Keep in mind that environment variables are expanded in runtime, leaving the entries in \fBPipfile\fP or \fBPipfile.lock\fP untouched. This is to avoid the accidental leakage of credentials in the source code. +.SS ☤ Injecting credentials through keychain support +.sp +Private regirstries on Google Cloud, Azure and AWS support dynamic credentials using +the keychain implementation. Due to the way the keychain is structured, it might ask +the user for input. Asking the user for input is disabled. This will disable the keychain +support completely, unfortunately. +.sp +If you want to work with private registries that use the keychain for authentication, you +can disable the "enforcement of no input". +.sp +\fBNote:\fP Please be sure that the keychain will really not ask for +input. Otherwise the process will hang forever!: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[[source]] +url = "https://europe\-python.pkg.dev/my\-project/python/simple" +verify_ssl = true +name = "private\-gcp" + +[packages] +flask = "*" +private\-test\-package = {version = "*", index = "private\-gcp"} + +[pipenv] +disable_pip_input = false +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Above example will install \fBflask\fP and a private package \fBprivate\-test\-package\fP from GCP. .SS ☤ Specifying Basically Anything .sp If you\(aqd like to specify that a specific package only be installed on certain systems, @@ -2821,6 +3310,20 @@ $ pipenv sync \fBpipenv install \-\-ignore\-pipfile\fP is nearly equivalent to \fBpipenv sync\fP, but \fBpipenv sync\fP will \fInever\fP attempt to re\-lock your dependencies as it is considered an atomic operation. \fBpipenv install\fP by default does attempt to re\-lock unless using the \fB\-\-deploy\fP flag. .UNINDENT .UNINDENT +.sp +You may only wish to verify your \fBPipfile.lock\fP is up\-to\-date with dependencies specified in the \fBPipfile\fP, without installing: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ pipenv verify +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +The command will perform a verification, and return an exit code \fB1\fP when dependency locking is needed. This may be useful for cases when the \fBPipfile.lock\fP file is subject to version control, so this command can be used within your CI/CD pipelines. .SS Deploying System Dependencies .sp You can tell Pipenv to install a Pipfile\(aqs contents into its parent system with the \fB\-\-system\fP flag: @@ -2863,9 +3366,10 @@ $ pipenv \-\-python=/path/to/python \-\-site\-packages .UNINDENT .SS ☤ Generating a \fBrequirements.txt\fP .sp +Sometimes, you would want to generate a requirements file based on your current +environment, for example to include tooling that only supports requirements.txt. You can convert a \fBPipfile\fP and \fBPipfile.lock\fP into a \fBrequirements.txt\fP -file very easily, and get all the benefits of extras and other goodies we have -included. +file very easily. .sp Let\(aqs take this \fBPipfile\fP: .INDENT 0.0 @@ -2893,7 +3397,8 @@ And generate a set of requirements out of it with only the default dependencies: .sp .nf .ft C -$ pipenv lock \-r +$ pipenv requirements +\-i https://pypi.org/simple chardet==3.0.4 requests==2.18.4 certifi==2017.7.27.1 @@ -2911,7 +3416,8 @@ development dependencies: .sp .nf .ft C -$ pipenv lock \-r \-\-dev +$ pipenv requirements \-\-dev +\-i https://pypi.org/simple chardet==3.0.4 requests==2.18.4 certifi==2017.7.27.1 @@ -2924,7 +3430,7 @@ pytest==3.2.3 .UNINDENT .UNINDENT .sp -Finally, if you wish to generate a requirements file with only the +If you wish to generate a requirements file with only the development requirements you can do that too, using the \fB\-\-dev\-only\fP flag: .INDENT 0.0 @@ -2932,7 +3438,8 @@ flag: .sp .nf .ft C -$ pipenv lock \-r \-\-dev\-only +$ pipenv requirements \-\-dev\-only +\-i https://pypi.org/simple py==1.4.34 pytest==3.2.3 .ft P @@ -2940,6 +3447,9 @@ pytest==3.2.3 .UNINDENT .UNINDENT .sp +Adding the \fB\-\-hash\fP flag adds package hashes to the output for extra security. +Adding the \fB\-\-exclude\-markers\fP flag excludes the markers from the output. +.sp The locked requirements are written to stdout, with shell output redirection used to write them to a file: .INDENT 0.0 @@ -2947,15 +3457,17 @@ used to write them to a file: .sp .nf .ft C -$ pipenv lock \-r > requirements.txt -$ pipenv lock \-r \-\-dev\-only > dev\-requirements.txt +$ pipenv requirements > requirements.txt +$ pipenv requirements \-\-dev\-only > dev\-requirements.txt $ cat requirements.txt +\-i https://pypi.org/simple chardet==3.0.4 requests==2.18.4 certifi==2017.7.27.1 idna==2.6 urllib3==1.22 $ cat dev\-requirements.txt +\-i https://pypi.org/simple py==1.4.34 pytest==3.2.3 .ft P @@ -3021,7 +3533,7 @@ hardened for production use and should be used only as a development aid. \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 -Each month, \fIPyUp.io\fP updates the \fBsafety\fP database of +Each month, \fI\%PyUp.io\fP updates the \fBsafety\fP database of insecure Python packages and \fI\%makes it available to the community for free\fP\&. Pipenv makes an API call to retrieve those results and use them @@ -3158,7 +3670,7 @@ Type "help", "copyright", "credits" or "license" for more information. .UNINDENT .UNINDENT .sp -Shell like variable expansion is available in \fB\&.env\fP files using \fI${VARNAME}\fP syntax.: +Shell like variable expansion is available in \fB\&.env\fP files using \fB${VARNAME}\fP syntax.: .INDENT 0.0 .INDENT 3.5 .sp @@ -3286,37 +3798,13 @@ variables. To activate them, simply create the variable in your shell and pipenv will detect it. .INDENT 0.0 .TP -.B pipenv.environments.PIPENV_COLORBLIND = False -If set, disable terminal colors. -.sp -Some people don\(aqt like colors in their terminals, for some reason. Default is -to show colors. -.UNINDENT -.INDENT 0.0 -.TP -.B pipenv.environments.PIPENV_HIDE_EMOJIS = False +.B pipenv.environments.PIPENV_HIDE_EMOJIS = False Disable emojis in output. .sp Default is to show emojis. This is automatically set on Windows. .UNINDENT .INDENT 0.0 .TP -.B pipenv.environments.env_to_bool(val) -Convert \fBval\fP to boolean, returning True if truthy or False if falsey -.INDENT 7.0 -.TP -.B Parameters -\fBval\fP (\fIAny\fP) \-\- The value to convert -.TP -.B Returns -False if Falsey, True if truthy -.TP -.B Return type -bool -.UNINDENT -.UNINDENT -.INDENT 0.0 -.TP .B pipenv.environments.get_from_env(arg, prefix=\(aqPIPENV\(aq, check_for_negation=True) Check the environment for a variable, returning its truthy or stringified value .sp @@ -3393,6 +3881,17 @@ export WORKON_HOME=~/.venvs .UNINDENT .sp In addition, you can also have Pipenv stick the virtualenv in \fBproject/.venv\fP by setting the \fBPIPENV_VENV_IN_PROJECT\fP environment variable. +.SS ☤ Virtual Environment Name +.sp +The virtualenv name created by Pipenv may be different from what you were expecting. +Dangerous characters (i.e. \fB$\(ga!*@"\fP as well as space, line feed, carriage return, +and tab) are converted to underscores. Additionally, the full path to the current +folder is encoded into a "slug value" and appended to ensure the virtualenv name +is unique. +.sp +Pipenv supports a arbitrary custom name for the virtual environment set at \fBPIPENV_CUSTOM_VENV_NAME\fP\&. +.sp +The logical place to specify this would be in a user\(aqs \fB\&.env\fP file in the root of the project, which gets loaded by pipenv when it is invoked. .SS ☤ Testing Projects .sp Pipenv is being used in projects like \fI\%Requests\fP for declaring development dependencies and running the test suite. @@ -3535,8 +4034,8 @@ Magic shell completions are now enabled! It\(aqs reasonably common for platform specific Python bindings for operating system interfaces to only be available through the system package manager, and hence unavailable for installation into virtual -environments with \fIpip\fP\&. In these cases, the virtual environment can -be created with access to the system \fIsite\-packages\fP directory: +environments with \fBpip\fP\&. In these cases, the virtual environment can +be created with access to the system \fBsite\-packages\fP directory: .INDENT 0.0 .INDENT 3.5 .sp @@ -3548,10 +4047,10 @@ $ pipenv \-\-three \-\-site\-packages .UNINDENT .UNINDENT .sp -To ensure that all \fIpip\fP\-installable components actually are installed +To ensure that all \fBpip\fP\-installable components actually are installed into the virtual environment and system packages are only used for interfaces that don\(aqt participate in Python\-level dependency resolution -at all, use the \fIPIP_IGNORE_INSTALLED\fP setting: +at all, use the \fBPIP_IGNORE_INSTALLED\fP setting: .INDENT 0.0 .INDENT 3.5 .sp @@ -3575,7 +4074,7 @@ To summarize: .IP \(bu 2 For libraries, define \fBabstract dependencies\fP via \fBinstall_requires\fP in \fBsetup.py\fP\&. The decision of which version exactly to be installed and where to obtain that dependency is not yours to make! .IP \(bu 2 -For applications, define \fBdependencies and where to get them\fP in the \fIPipfile\fP and use this file to update the set of \fBconcrete dependencies\fP in \fBPipfile.lock\fP\&. This file defines a specific idempotent environment that is known to work for your project. The \fBPipfile.lock\fP is your source of truth. The \fBPipfile\fP is a convenience for you to create that lock\-file, in that it allows you to still remain somewhat vague about the exact version of a dependency to be used. Pipenv is there to help you define a working conflict\-free set of specific dependency\-versions, which would otherwise be a very tedious task. +For applications, define \fBdependencies and where to get them\fP in the \fBPipfile\fP and use this file to update the set of \fBconcrete dependencies\fP in \fBPipfile.lock\fP\&. This file defines a specific idempotent environment that is known to work for your project. The \fBPipfile.lock\fP is your source of truth. The \fBPipfile\fP is a convenience for you to create that lock\-file, in that it allows you to still remain somewhat vague about the exact version of a dependency to be used. Pipenv is there to help you define a working conflict\-free set of specific dependency\-versions, which would otherwise be a very tedious task. .IP \(bu 2 Of course, \fBPipfile\fP and Pipenv are still useful for library developers, as they can be used to define a development or test environment. .IP \(bu 2 @@ -3600,8 +4099,251 @@ This will tell Pipenv to lock all your \fBsetup.py\fP–declared dependencies. You can force Pipenv to use a different cache location by setting the environment variable \fBPIPENV_CACHE_DIR\fP to the location you wish. This is useful in the same situations that you would change \fBPIP_CACHE_DIR\fP to a different directory. .SS ☤ Changing Default Python Versions .sp -By default, Pipenv will initialize a project using whatever version of python the python3 is. Besides starting a project with the \fB\-\-three\fP or \fB\-\-two\fP flags, you can also use \fBPIPENV_DEFAULT_PYTHON_VERSION\fP to specify what version to use when starting a project when \fB\-\-three\fP or \fB\-\-two\fP aren\(aqt used. +By default, Pipenv will initialize a project using whatever version of python the system has as default. Besides starting a project with the \fB\-\-python\fP or \fB\-\-three\fP flags, you can also use \fBPIPENV_DEFAULT_PYTHON_VERSION\fP to specify what version to use when starting a project when \fB\-\-python\fP or \fB\-\-three\fP aren\(aqt used. .SS Pipenv CLI Reference +.SS pipenv +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv [OPTIONS] COMMAND [ARGS]... +.ft P +.fi +.UNINDENT +.UNINDENT +.SS check +.sp +Checks for PyUp Safety security vulnerabilities and against PEP 508 markers provided in Pipfile. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv check [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS clean +.sp +Uninstalls all packages not specified in Pipfile.lock. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv clean [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS graph +.sp +Displays currently\-installed dependency graph information. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv graph [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS install +.sp +Installs provided packages and adds them to Pipfile, or (if no packages are given), installs all packages from Pipfile. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv install [OPTIONS] [PACKAGES]... +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Environment variables +.INDENT 0.0 +.TP +.B PIPENV_SKIP_LOCK +.INDENT 7.0 +.INDENT 3.5 +Provide a default for \fB\-\-skip\-lock\fP +.UNINDENT +.UNINDENT +.UNINDENT +.INDENT 0.0 +.TP +.B PIP_INDEX_URL +.INDENT 7.0 +.INDENT 3.5 +Provide a default for \fB\-i\fP +.UNINDENT +.UNINDENT +.UNINDENT +.SS lock +.sp +Generates Pipfile.lock. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv lock [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS open +.sp +View a given module in your editor. +.sp +This uses the EDITOR environment variable. You can temporarily override it, +for example: +.INDENT 0.0 +.INDENT 3.5 +EDITOR=atom pipenv open requests +.UNINDENT +.UNINDENT +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv open [OPTIONS] MODULE +.ft P +.fi +.UNINDENT +.UNINDENT +.SS requirements +.sp +Generate a requirements.txt from Pipfile.lock. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv requirements [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS run +.sp +Spawns a command installed into the virtualenv. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv run [OPTIONS] COMMAND [ARGS]... +.ft P +.fi +.UNINDENT +.UNINDENT +.SS scripts +.sp +Lists scripts in current environment config. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv scripts [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS shell +.sp +Spawns a shell within the virtualenv. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv shell [OPTIONS] [SHELL_ARGS]... +.ft P +.fi +.UNINDENT +.UNINDENT +.SS sync +.sp +Installs all packages specified in Pipfile.lock. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv sync [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT +.SS uninstall +.sp +Uninstalls a provided package and removes it from Pipfile. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv uninstall [OPTIONS] [PACKAGES]... +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Environment variables +.INDENT 0.0 +.TP +.B PIPENV_SKIP_LOCK +.INDENT 7.0 +.INDENT 3.5 +Provide a default for \fB\-\-skip\-lock\fP +.UNINDENT +.UNINDENT +.UNINDENT +.SS update +.sp +Runs lock, then sync. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv update [OPTIONS] [PACKAGES]... +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Environment variables +.INDENT 0.0 +.TP +.B PIP_INDEX_URL +.INDENT 7.0 +.INDENT 3.5 +Provide a default for \fB\-i\fP +.UNINDENT +.UNINDENT +.UNINDENT +.SS verify +.sp +Verify the hash in Pipfile.lock is up\-to\-date. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pipenv verify [OPTIONS] +.ft P +.fi +.UNINDENT +.UNINDENT .SS Frequently Encountered Pipenv Problems .sp Pipenv is constantly being improved by volunteers, but is still a very young @@ -3665,7 +4407,7 @@ in your Pipfile. .sp This is usually a result of mixing Pipenv with system packages. We \fIstrongly\fP recommend installing Pipenv in an isolated environment. Uninstall all existing -Pipenv installations, and see installing\-pipenv to choose one of the +Pipenv installations, and see \fI\%☤ Installing Pipenv\fP to choose one of the recommended way to install Pipenv instead. .SS ☤ My pyenv\-installed Python is not found .sp @@ -3675,7 +4417,7 @@ distributions, with version name like \fB3.6.4\fP or similar. .sp Pipenv by default uses the Python it is installed against to create the virtualenv. You can set the \fB\-\-python\fP option to \fB$(pyenv which python)\fP -to use your current pyenv interpreter. See specifying_versions for more +to use your current pyenv interpreter. See \fI\%☤ Specifying Versions of a Package\fP for more information. .SS ☤ ValueError: unknown locale: UTF\-8 .sp @@ -3721,7 +4463,7 @@ for a possible solution. .SS ☤ Pipenv does not respect dependencies in setup.py .sp No, it does not, intentionally. Pipfile and setup.py serve different purposes, -and should not consider each other by default. See pipfile\-vs\-setuppy +and should not consider each other by default. See \fI\%☤ Pipfile vs setup.py\fP for more information. .SS ☤ Using \fBpipenv run\fP in Supervisor program .sp @@ -3747,33 +4489,6 @@ to speed up subsequent runs. The cache may contain faulty results if a bug causes the format to corrupt, even after the bug is fixed. \fB\-\-clear\fP flushes the cache, and therefore removes the bad results. .SH CONTRIBUTION GUIDES -.SS Development Philosophy -.sp -Pipenv is an open but opinionated tool, created by an open but opinionated developer. -.SS Management Style -.INDENT 0.0 -.INDENT 3.5 -\fBTo be updated (as of March 2020)\fP\&. -.UNINDENT -.UNINDENT -.sp -\fI\%Kenneth Reitz\fP is the BDFL. He has final say in any decision related to the Pipenv project. Kenneth is responsible for the direction and form of the library, as well as its presentation. In addition to making decisions based on technical merit, he is responsible for making decisions based on the development philosophy of Pipenv. -.sp -\fI\%Dan Ryan\fP, \fI\%Tzu\-ping Chung\fP, and \fI\%Nate Prewitt\fP are the core contributors. -They are responsible for triaging bug reports, reviewing pull requests and ensuring that Kenneth is kept up to speed with developments around the library. -The day\-to\-day managing of the project is done by the core contributors. They are responsible for making judgments about whether or not a feature request is -likely to be accepted by Kenneth. -.SS Values -.INDENT 0.0 -.IP \(bu 2 -Simplicity is always better than functionality. -.IP \(bu 2 -Listen to everyone, then disregard it. -.IP \(bu 2 -The API is all that matters. Everything else is secondary. -.IP \(bu 2 -Fit the 90% use\-case. Ignore the nay\-sayers. -.UNINDENT .SS Contributing to Pipenv .sp If you\(aqre reading this, you\(aqre probably interested in contributing to Pipenv. @@ -3783,9 +4498,7 @@ contributing to the Pipenv project is \fIvery\fP generous of you. .sp This document lays out guidelines and advice for contributing to this project. If you\(aqre thinking of contributing, please start by reading this document and -getting a feel for how contributing to this project works. If you have any -questions, feel free to reach out to either \fI\%Dan Ryan\fP, \fI\%Tzu\-ping Chung\fP, -or \fI\%Nate Prewitt\fP, the primary maintainers. +getting a feel for how contributing to this project works. .sp The guide is split into sections based on the type of contribution you\(aqre thinking of making, with a section that covers general guidelines for all @@ -3800,7 +4513,7 @@ contributors. .sp Pipenv has one very important rule governing all forms of contribution, including reporting bugs or requesting features. This golden rule is -"\fI\%be cordial or be on your way\fP". +"\fI\%be cordial or be on your way\fP" .sp \fBAll contributions are welcome\fP, as long as everyone involved is treated with respect. @@ -3861,25 +4574,24 @@ project. The following sub\-sections go into more detail on some of the points above. .SS Development Setup .sp -To get your development environment setup, run: +The repository version of Pipenv must be installed over other global versions to +resolve conflicts with the \fBpipenv\fP folder being implicitly added to \fBsys.path\fP\&. +See \fI\%pypa/pipenv#2557\fP for more details. +.sp +Pipenv now uses pre\-commit hooks similar to Pip in order to apply linting and +code formatting automatically! The build now also checks that these linting rules +have been applied to the code before running the tests. +The build will fail when linting changes are detected so be sure to sync dev requirements +and install the pre\-commit hooks locally: .INDENT 0.0 .INDENT 3.5 -.sp -.nf -.ft C -pip install \-e . -pipenv install \-\-dev -.ft P -.fi +$ \fBpipenv install \-\-dev\fP +# This will configure running the pre\-commit checks at start of each commit +$ \fBpre\-commit install\fP +# Should you want to check the pre\-commit configuration against all configured project files +$ \fBpre\-commit run \-\-all\-files \-\-verbose\fP .UNINDENT .UNINDENT -.sp -This will install the repository version of Pipenv and then install the development -dependencies. Once that has completed, you can start developing. -.sp -The repository version of Pipenv must be installed over other global versions to -resolve conflicts with the \fBpipenv\fP folder being implicitly added to \fBsys.path\fP\&. -See \fI\%pypa/pipenv#2557\fP for more details. .SS Testing .sp Tests are written in \fBpytest\fP style and can be run very simply: @@ -3985,15 +4697,54 @@ will close your issue without fixing it. .UNINDENT .SS Run the tests .sp -Three ways of running the tests are as follows: +There are a few ways of running the tests: .INDENT 0.0 .IP 1. 3 -\fBmake test\fP (which uses \fBdocker\fP) +run\-tests.sh +.UNINDENT +.sp +The scripts for bash or windows: \fB\&./run\-tests.sh\fP and \fBrun\-tests.bat\fP +.sp +Note that, you override the default Python Pipenv will use with +PIPENV_PYTHON and the Python binary name with PYTHON in case it +is not called \fBpython\fP on your system or in case you have many. +Here is an example how you can override both variables (you can +override just one too): +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ PYTHON=python3.8 PIPENV_PYTHON=python3.9 run\-tests.sh +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +You can also do: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ PYTHON=/opt/python/python3.10/python3 run\-tests.sh +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +If you need to change how pytest is invoked, see how to run the +test suite manually. The \fBrun\-tests.sh\fP script does the same +steps the Github CI workflow does, and as such it is recommended +you run it before you open a PR. Taking this second approach, +will allow you, for example, to run a single test case, or +\fBfail fast\fP if you need it. +.INDENT 0.0 .IP 2. 3 -\fB\&./run\-tests.sh\fP or \fBrun\-tests.bat\fP -.IP 3. 3 -Using pipenv: +Manually .UNINDENT +.sp +This repeats the steps of the scripts above: .INDENT 0.0 .INDENT 3.5 .sp @@ -4003,13 +4754,61 @@ $ git clone https://github.com/pypa/pipenv.git $ cd pipenv $ git submodule sync && git submodule update \-\-init \-\-recursive $ pipenv install \-\-dev -$ pipenv run pytest +$ pipenv run pytest [\-\-any optional arguments to pytest] +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +The second options assumes you already have \fBpipenv\fP on your system. +And simply repeats all the steps in the script above. +.sp +Preferably, you should be running your tests in a Linux container +(or FreeBSD Jail or even VM). This will guarantee that you don\(aqt break +stuff, and that the tests run in a pristine environment. +.sp +Consider doing, something like: +.sp +\fB\(ga +$ docker run \-\-rm \-v $(pwd):/usr/src \-it python:3.7 bash +# inside the container +# adduser \-\-disabled\-password debian +# su debian && cd /usr/src/ +# bash run\-tests.sh +\(ga\fP +.INDENT 0.0 +.IP 3. 3 +Using the Makefile: +.UNINDENT +.sp +The Makefile automates all the task as in the script. However, it allows +one more fine grained control on every step. For example: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ make ramdisk # create a ram disk to preserve your SSDs life +$ make ramdisk\-virtualenv +$ make test suite="\-m not cli" # run all tests but cli +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +or +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ make tests parallel="" suite="tests/integration/test_cli.py::test_pipenv_check" .ft P .fi .UNINDENT .UNINDENT .sp -For the last two, it is important that your environment is setup correctly, and +It is important that your environment is setup correctly, and this may take some work, for example, on a specific Mac installation, the following steps may be needed: .INDENT 0.0 @@ -4020,7 +4819,7 @@ steps may be needed: # Make sure the tests can access github if [ "$SSH_AGENT_PID" = "" ] then - eval \(gassh\-agent\(ga + eval \(ga\(gassh\-agent\(ga\(ga ssh\-add fi @@ -4041,13 +4840,13 @@ unset PIP_FIND_LINKS .UNINDENT .INDENT 0.0 .IP \(bu 2 -genindex +\fI\%Index\fP .IP \(bu 2 -modindex +\fI\%Module Index\fP .UNINDENT .SH AUTHOR Python Packaging Authority .SH COPYRIGHT -2020. A project founded by Kenneth Reitz +2020. A project founded by Kenneth Reitz and maintained by Python Packaging Authority (PyPA). .\" Generated by docutils manpage writer. . From b2a9bbaba62b2a4271681be871c7aa5d4b442a50 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 10:47:57 -0400 Subject: [PATCH 005/200] Apply linter. --- pipenv/pipenv.1 | 10 +++++----- tasks/__init__.py | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index 436c3296dc..7fe1b40623 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -1344,28 +1344,28 @@ Update vendored dependencies and invocations .INDENT 2.0 .IP \(bu 2 Update vendored and patched dependencies -\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, +\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, .nf \(ga\(ga .fi tomlkit\(ga .IP \(bu 2 Fix invocations of dependencies -\- Fix custom +\- Fix custom .nf \(ga\(ga .fi InstallCommand\(ga instantiation -\- Update +\- Update .nf \(ga\(ga .fi PackageFinder\(ga usage -\- Fix +\- Fix .nf \(ga\(ga .fi -Bool\(ga stringify attempts from +Bool\(ga stringify attempts from .nf \(ga\(ga .fi diff --git a/tasks/__init__.py b/tasks/__init__.py index 27e9bd519f..961ec1ef2a 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -9,4 +9,3 @@ ROOT = Path(".").parent.parent.absolute() ns = invoke.Collection(vendoring, release, release.clean_mdchangelog) - From acea12460d866c5a90c5027653baf9461c55c2bc Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 10:49:59 -0400 Subject: [PATCH 006/200] Mark dev version for main. --- pipenv/__version__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/__version__.py b/pipenv/__version__.py index c981b3a0d3..0e1278e604 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.5" +__version__ = "2022.8.5.dev" From 1fb3ae6bf3e8041a805bb9de086fa06c0ea90bfe Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Wed, 27 Jul 2022 10:00:18 -0400 Subject: [PATCH 007/200] Remove old way of generting requirements. --- pipenv/cli/command.py | 46 ++++++---------------------------- pipenv/cli/options.py | 39 ---------------------------- pipenv/core.py | 19 ++------------ tests/integration/test_lock.py | 26 +++++++++---------- 4 files changed, 21 insertions(+), 109 deletions(-) diff --git a/pipenv/cli/command.py b/pipenv/cli/command.py index 43d38e1eba..9a88bd11ae 100644 --- a/pipenv/cli/command.py +++ b/pipenv/cli/command.py @@ -23,7 +23,6 @@ uninstall_options, verbose_option, ) -from pipenv.exceptions import PipenvOptionsError from pipenv.utils.processes import subprocess_run from pipenv.vendor.click import ( Choice, @@ -330,47 +329,16 @@ def lock(ctx, state, **kwargs): warn=(not state.quiet), site_packages=state.site_packages, ) - emit_requirements = state.lockoptions.emit_requirements dev = state.installstate.dev dev_only = state.lockoptions.dev_only pre = state.installstate.pre - if emit_requirements: - secho( - "Warning: The lock flag -r/--requirements will be deprecated in a future version\n" - "of pipenv in favor of the new requirements command. For more info see\n" - "https://pipenv.pypa.io/en/latest/advanced/#generating-a-requirements-txt\n" - "NOTE: the requirements command parses Pipfile.lock directly without performing any\n" - "locking operations. Updating packages should be done by running pipenv lock.", - fg="yellow", - err=True, - ) - # Emit requirements file header (unless turned off with --no-header) - if state.lockoptions.emit_requirements_header: - header_options = ["--requirements"] - if dev_only: - header_options.append("--dev-only") - elif dev: - header_options.append("--dev") - echo(LOCK_HEADER.format(options=" ".join(header_options))) - # TODO: Emit pip-compile style header - if dev and not dev_only: - echo(LOCK_DEV_NOTE) - # Setting "emit_requirements=True" means do_init() just emits the - # install requirements file to stdout, it doesn't install anything - do_init( - state.project, - dev=dev, - dev_only=dev_only, - emit_requirements=emit_requirements, - pypi_mirror=state.pypi_mirror, - pre=pre, - ) - elif state.lockoptions.dev_only: - raise PipenvOptionsError( - "--dev-only", - "--dev-only is only permitted in combination with --requirements. " - "Aborting.", - ) + do_init( + state.project, + dev=dev, + dev_only=dev_only, + pypi_mirror=state.pypi_mirror, + pre=pre, + ) do_lock( state.project, ctx=ctx, diff --git a/pipenv/cli/options.py b/pipenv/cli/options.py index 110f37d847..7ab9479738 100644 --- a/pipenv/cli/options.py +++ b/pipenv/cli/options.py @@ -92,8 +92,6 @@ def __init__(self): class LockOptions: def __init__(self): self.dev_only = False - self.emit_requirements = False - self.emit_requirements_header = False pass_state = make_pass_decorator(State, ensure=True) @@ -460,41 +458,6 @@ def callback(ctx, param, value): )(f) -def emit_requirements_flag(f): - def callback(ctx, param, value): - state = ctx.ensure_object(State) - if value: - state.lockoptions.emit_requirements = value - return value - - return option( - "--requirements", - "-r", - default=False, - is_flag=True, - expose_value=False, - help="Generate output in requirements.txt format.", - callback=callback, - )(f) - - -def emit_requirements_header_flag(f): - def callback(ctx, param, value): - state = ctx.ensure_object(State) - if value: - state.lockoptions.emit_requirements_header = value - return value - - return option( - "--header/--no-header", - default=True, - is_flag=True, - expose_value=False, - help="Add header to generated requirements", - callback=callback, - )(f) - - def dev_only_flag(f): def callback(ctx, param, value): state = ctx.ensure_object(State) @@ -597,8 +560,6 @@ def uninstall_options(f): def lock_options(f): f = install_base_options(f) f = lock_dev_option(f) - f = emit_requirements_flag(f) - f = emit_requirements_header_flag(f) f = dev_only_flag(f) return f diff --git a/pipenv/core.py b/pipenv/core.py index 0c07964384..a9c05edc24 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -796,7 +796,6 @@ def do_install_dependencies( dev=False, dev_only=False, bare=False, - emit_requirements=False, allow_global=False, ignore_hashes=False, skip_lock=False, @@ -804,16 +803,13 @@ def do_install_dependencies( requirements_dir=None, pypi_mirror=None, ): - """ " - Executes the install functionality. + """ + Executes the installation functionality. - If emit_requirements is True, simply spits out a requirements format to stdout. """ import queue - if emit_requirements: - bare = True # Load the lockfile if it exists, or if dev_only is being used. if skip_lock or not project.lockfile_exists: if not bare: @@ -839,15 +835,6 @@ def do_install_dependencies( ) dev = dev or dev_only deps_list = list(lockfile.get_requirements(dev=dev, only=dev_only)) - if emit_requirements: - index_args = prepare_pip_source_args( - get_source_list(project, pypi_mirror=pypi_mirror) - ) - index_args = " ".join(index_args).replace(" -", "\n-") - deps = [req.as_line(sources=False, include_hashes=False) for req in deps_list] - click.echo(index_args) - click.echo("\n".join(sorted(deps))) - sys.exit(0) if concurrent: nprocs = project.s.PIPENV_MAX_SUBPROCESS else: @@ -1234,7 +1221,6 @@ def do_init( project, dev=False, dev_only=False, - emit_requirements=False, allow_global=False, ignore_pipfile=False, skip_lock=False, @@ -1341,7 +1327,6 @@ def do_init( project, dev=dev, dev_only=dev_only, - emit_requirements=emit_requirements, allow_global=allow_global, skip_lock=skip_lock, concurrent=concurrent, diff --git a/tests/integration/test_lock.py b/tests/integration/test_lock.py index ebaa7281d6..e08b61555b 100644 --- a/tests/integration/test_lock.py +++ b/tests/integration/test_lock.py @@ -46,16 +46,18 @@ def test_lock_requirements_file(PipenvInstance): dev_req_list = ("flask==0.12.2",) - c = p.pipenv('lock -r') - d = p.pipenv('lock -r -d') + c = p.pipenv('lock') assert c.returncode == 0 - assert d.returncode == 0 + + default = p.pipenv('requirements') + assert default.returncode == 0 + dev = p.pipenv('requirements --dev-only') for req in req_list: - assert req in c.stdout + assert req in default.stdout for req in dev_req_list: - assert req in d.stdout + assert req in dev.stdout @pytest.mark.lock @@ -306,7 +308,9 @@ def test_lock_extras_without_install(PipenvInstance): assert "pysocks" in p.lockfile["default"] assert "markers" not in p.lockfile["default"]['pysocks'] - c = p.pipenv('lock -r') + c = p.pipenv('lock') + assert c.returncode == 0 + c = p.pipenv('requirements') assert c.returncode == 0 assert "extra == 'socks'" not in c.stdout.strip() @@ -363,12 +367,8 @@ def test_private_index_lock_requirements(PipenvInstance_NoPyPI): requests = "*" """.strip() f.write(contents) - c = p.pipenv('install') - assert c.returncode == 0 - c = p.pipenv('lock -r') + c = p.pipenv('lock') assert c.returncode == 0 - assert '-i https://pypi.org/simple' in c.stdout.strip() - assert '--extra-index-url https://test.pypi.org/simple' in c.stdout.strip() @pytest.mark.lock @@ -400,9 +400,7 @@ def test_private_index_mirror_lock_requirements(PipenvInstance_NoPyPI): fake-package = "*" """.strip() f.write(contents) - c = p.pipenv(f'install --pypi-mirror {mirror_url}') - assert c.returncode == 0 - c = p.pipenv(f'lock -r --pypi-mirror {mirror_url}') + c = p.pipenv(f'install -v --pypi-mirror {mirror_url}') assert c.returncode == 0 assert f'-i {mirror_url}' in c.stdout.strip() assert '--extra-index-url https://test.pypi.org/simple' in c.stdout.strip() From 65a4f8de8bd0572cb369731c19b7abd89a5148d9 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Mon, 1 Aug 2022 21:00:22 -0400 Subject: [PATCH 008/200] test corrections. --- pipenv/cli/command.py | 11 +---------- tests/integration/test_lock.py | 8 ++------ 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/pipenv/cli/command.py b/pipenv/cli/command.py index 9a88bd11ae..069037590b 100644 --- a/pipenv/cli/command.py +++ b/pipenv/cli/command.py @@ -316,7 +316,7 @@ def uninstall(ctx, state, all_dev=False, all=False, **kwargs): @pass_context def lock(ctx, state, **kwargs): """Generates Pipfile.lock.""" - from ..core import do_init, do_lock, ensure_project + from ..core import do_lock, ensure_project # Ensure that virtualenv is available. # Note that we don't pass clear on to ensure_project as it is also @@ -329,16 +329,7 @@ def lock(ctx, state, **kwargs): warn=(not state.quiet), site_packages=state.site_packages, ) - dev = state.installstate.dev - dev_only = state.lockoptions.dev_only pre = state.installstate.pre - do_init( - state.project, - dev=dev, - dev_only=dev_only, - pypi_mirror=state.pypi_mirror, - pre=pre, - ) do_lock( state.project, ctx=ctx, diff --git a/tests/integration/test_lock.py b/tests/integration/test_lock.py index e08b61555b..0208c212bd 100644 --- a/tests/integration/test_lock.py +++ b/tests/integration/test_lock.py @@ -104,7 +104,7 @@ def test_lock_keep_outdated(PipenvInstance): contents = """ [packages] requests = {version = "==2.14.0"} -PyTest = "==3.1.0" +pytest = "==3.1.0" """.strip() f.write(contents) @@ -120,7 +120,7 @@ def test_lock_keep_outdated(PipenvInstance): updated_contents = """ [packages] requests = {version = "==2.18.4"} -PyTest = "*" +pytest = "*" """.strip() f.write(updated_contents) @@ -402,10 +402,6 @@ def test_private_index_mirror_lock_requirements(PipenvInstance_NoPyPI): f.write(contents) c = p.pipenv(f'install -v --pypi-mirror {mirror_url}') assert c.returncode == 0 - assert f'-i {mirror_url}' in c.stdout.strip() - assert '--extra-index-url https://test.pypi.org/simple' in c.stdout.strip() - assert f'--extra-index-url {mirror_url}' not in c.stdout.strip() - @pytest.mark.lock @pytest.mark.install From 6d7649304b9571b10277c5d8808525abf9f8ce0c Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Mon, 1 Aug 2022 21:05:16 -0400 Subject: [PATCH 009/200] Add news fragment. --- news/5200.removal.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5200.removal.rst diff --git a/news/5200.removal.rst b/news/5200.removal.rst new file mode 100644 index 0000000000..13c8857553 --- /dev/null +++ b/news/5200.removal.rst @@ -0,0 +1 @@ +The deprecated way of generating requriements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. From 52c7a8cddd1ca44094b4e4fdee7e4c420b34684f Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Sat, 6 Aug 2022 11:48:20 +0200 Subject: [PATCH 010/200] update README * Bump python version required * Add install instructions for gentoo * Update usage output from `pipenv --help` --- README.md | 141 +++++++++++++++++++++++++++++------------------------- 1 file changed, 77 insertions(+), 64 deletions(-) diff --git a/README.md b/README.md index d467892dc3..d36be97bbc 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ You can quickly play with Pipenv right in your browser: Installation ------------ -**Pipenv can be installed with Python 3.6 and above.** +**Pipenv can be installed with Python 3.7 and above.** If you\'re using Debian Buster+: @@ -51,7 +51,11 @@ Or, if you\'re using Fedora: Or, if you\'re using FreeBSD: - pkg install py36-pipenv + pkg install py39-pipenv + +Or, if you\'re using Gentoo: + + sudo emerge pipenv Or, if you\'re using Windows: @@ -142,68 +146,77 @@ Magic shell completions are now enabled! ☤ Usage ------- - $ pipenv - Usage: pipenv [OPTIONS] COMMAND [ARGS]... - - Options: - --where Output project home information. - --venv Output virtualenv information. - --py Output Python interpreter information. - --envs Output Environment Variable options. - --rm Remove the virtualenv. - --bare Minimal output. - --man Display manpage. - --three Use Python 3 when creating virtualenv. - --python TEXT Specify which version of Python virtualenv should use. - --site-packages Enable site-packages for the virtualenv. - --version Show the version and exit. - -h, --help Show this message and exit. - - - Usage Examples: - Create a new project using Python 3.9, specifically: - $ pipenv --python 3.9 - - Remove project virtualenv (inferred from current directory): - $ pipenv --rm - - Install all dependencies for a project (including dev): - $ pipenv install --dev - - Create a lockfile containing pre-releases: - $ pipenv lock --pre - - Show a graph of your installed dependencies: - $ pipenv graph - - Check your installed dependencies for security vulnerabilities: - $ pipenv check - - Install a local setup.py into your virtual environment/Pipfile: - $ pipenv install -e . - - Use a lower-level pip command: - $ pipenv run pip freeze - - Generate a requirements.txt file (including dev): - $ pipenv requirements --dev > requirements.txt - - Commands: - check Checks for security vulnerabilities and against PEP 508 markers - provided in Pipfile. - clean Uninstalls all packages not specified in Pipfile.lock. - graph Displays currently–installed dependency graph information. - install Installs provided packages and adds them to Pipfile, or (if no - packages are given), installs all packages from Pipfile. - lock Generates Pipfile.lock. - open View a given module in your editor. - run Spawns a command installed into the virtualenv. - scripts Displays the shortcuts in the (optional) [scripts] section of - Pipfile. - shell Spawns a shell within the virtualenv. - sync Installs all packages specified in Pipfile.lock. - requirements Generates a requirements.txt compatible output directly from Pipfile.lock - uninstall Un-installs a provided package and removes it from Pipfile. + $ pipenv --help + Usage: pipenv [OPTIONS] COMMAND [ARGS]... + + Options: + --where Output project home information. + --venv Output virtualenv information. + --py Output Python interpreter information. + --envs Output Environment Variable options. + --rm Remove the virtualenv. + --bare Minimal output. + --man Display manpage. + --support Output diagnostic information for use in + GitHub issues. + --site-packages / --no-site-packages + Enable site-packages for the virtualenv. + [env var: PIPENV_SITE_PACKAGES] + --python TEXT Specify which version of Python virtualenv + should use. + --three Use Python 3 when creating virtualenv. + --clear Clears caches (pipenv, pip). [env var: + PIPENV_CLEAR] + -q, --quiet Quiet mode. + -v, --verbose Verbose mode. + --pypi-mirror TEXT Specify a PyPI mirror. + --version Show the version and exit. + -h, --help Show this message and exit. + + + Usage Examples: + Create a new project using Python 3.7, specifically: + $ pipenv --python 3.7 + + Remove project virtualenv (inferred from current directory): + $ pipenv --rm + + Install all dependencies for a project (including dev): + $ pipenv install --dev + + Create a lockfile containing pre-releases: + $ pipenv lock --pre + + Show a graph of your installed dependencies: + $ pipenv graph + + Check your installed dependencies for security vulnerabilities: + $ pipenv check + + Install a local setup.py into your virtual environment/Pipfile: + $ pipenv install -e . + + Use a lower-level pip command: + $ pipenv run pip freeze + + Commands: + check Checks for PyUp Safety security vulnerabilities and against + PEP 508 markers provided in Pipfile. + clean Uninstalls all packages not specified in Pipfile.lock. + graph Displays currently-installed dependency graph information. + install Installs provided packages and adds them to Pipfile, or (if no + packages are given), installs all packages from Pipfile. + lock Generates Pipfile.lock. + open View a given module in your editor. + requirements Generate a requirements.txt from Pipfile.lock. + run Spawns a command installed into the virtualenv. + scripts Lists scripts in current environment config. + shell Spawns a shell within the virtualenv. + sync Installs all packages specified in Pipfile.lock. + uninstall Uninstalls a provided package and removes it from Pipfile. + update Runs lock, then sync. + verify Verify the hash in Pipfile.lock is up-to-date. + Locate the project: From 96f4c02e6d7a548796a12ee3a7019399ec5958ed Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 18:37:28 -0400 Subject: [PATCH 011/200] Improve handling of CI varaible for build systems that set it to a string like azure. --- pipenv/environments.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pipenv/environments.py b/pipenv/environments.py index 5b5e49f4e2..ed0d1664ad 100644 --- a/pipenv/environments.py +++ b/pipenv/environments.py @@ -81,7 +81,15 @@ def normalize_pipfile_path(p): os.environ.pop("__PYVENV_LAUNCHER__", None) # Internal, to tell whether the command line session is interactive. SESSION_IS_INTERACTIVE = _isatty(sys.stdout) -PIPENV_IS_CI = env_to_bool(os.environ.get("CI") or os.environ.get("TF_BUILD") or False) + +# TF_BUILD indicates to Azure pipelines it is a build step +PIPENV_IS_CI = os.environ.get("CI") or os.environ.get("TF_BUILD") +try: + PIPENV_IS_CI = env_to_bool(PIPENV_IS_CI) +except ValueError: + # CI variable detected and it did not evaluate to a false value + PIPENV_IS_CI = True + NO_COLOR = False if os.getenv("NO_COLOR") or os.getenv("PIPENV_COLORBLIND"): NO_COLOR = True From ca54a6f08c43bedc643262c305f40075a95ded9e Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 5 Aug 2022 18:41:52 -0400 Subject: [PATCH 012/200] Add news fragment. --- news/5128.bugfix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5128.bugfix.rst diff --git a/news/5128.bugfix.rst b/news/5128.bugfix.rst new file mode 100644 index 0000000000..120ce82e56 --- /dev/null +++ b/news/5128.bugfix.rst @@ -0,0 +1 @@ +If environment variable ``CI`` or ``TF_BUILD`` is set but does not evaluate to ``False`` it is now treated as ``True``. From 3aa96bef75c6030b5f766518b8d8bfe4d6c22867 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 09:17:00 -0400 Subject: [PATCH 013/200] refactor based on PR feedback. --- pipenv/environments.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pipenv/environments.py b/pipenv/environments.py index ed0d1664ad..1a392d3fd4 100644 --- a/pipenv/environments.py +++ b/pipenv/environments.py @@ -83,12 +83,8 @@ def normalize_pipfile_path(p): SESSION_IS_INTERACTIVE = _isatty(sys.stdout) # TF_BUILD indicates to Azure pipelines it is a build step -PIPENV_IS_CI = os.environ.get("CI") or os.environ.get("TF_BUILD") -try: - PIPENV_IS_CI = env_to_bool(PIPENV_IS_CI) -except ValueError: - # CI variable detected and it did not evaluate to a false value - PIPENV_IS_CI = True +PIPENV_IS_CI = _is_env_truthy(os.environ.get("CI") or os.environ.get("TF_BUILD")) + NO_COLOR = False if os.getenv("NO_COLOR") or os.getenv("PIPENV_COLORBLIND"): From b5d9cf7799de928fc923931dfb5b62d697646ba2 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 09:25:37 -0400 Subject: [PATCH 014/200] refactor based on PR feedback. --- pipenv/environments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/environments.py b/pipenv/environments.py index 1a392d3fd4..7f1eb8bfae 100644 --- a/pipenv/environments.py +++ b/pipenv/environments.py @@ -83,7 +83,7 @@ def normalize_pipfile_path(p): SESSION_IS_INTERACTIVE = _isatty(sys.stdout) # TF_BUILD indicates to Azure pipelines it is a build step -PIPENV_IS_CI = _is_env_truthy(os.environ.get("CI") or os.environ.get("TF_BUILD")) +PIPENV_IS_CI = _is_env_truthy("CI") or _is_env_truthy("TF_BUILD") NO_COLOR = False From d71e89d6b5f711689fb56509746067fbea063704 Mon Sep 17 00:00:00 2001 From: amir Date: Sat, 6 Aug 2022 14:32:12 +0200 Subject: [PATCH 015/200] fix bug when auto completing install and uninstall --- pipenv/cli/options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/cli/options.py b/pipenv/cli/options.py index 7ab9479738..7018c898bf 100644 --- a/pipenv/cli/options.py +++ b/pipenv/cli/options.py @@ -281,7 +281,7 @@ def callback(ctx, param, value): "packages", nargs=-1, callback=callback, - expose_value=False, + expose_value=True, type=click_types.STRING, )(f) From 290ba9dd55f4971f1ed9a547ae7de37ea21bf1b6 Mon Sep 17 00:00:00 2001 From: amir Date: Sat, 6 Aug 2022 15:34:26 +0200 Subject: [PATCH 016/200] add news of the bugfix --- news/5214.bugfix.rst | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 news/5214.bugfix.rst diff --git a/news/5214.bugfix.rst b/news/5214.bugfix.rst new file mode 100644 index 0000000000..3bb5f6a143 --- /dev/null +++ b/news/5214.bugfix.rst @@ -0,0 +1,2 @@ +Fix auto-complete crashing on 'install' and 'uninstall' keywords + From b4f7b15120a8be20274d4c9fa1c3f54e37f91971 Mon Sep 17 00:00:00 2001 From: amir Date: Sat, 6 Aug 2022 20:03:35 +0200 Subject: [PATCH 017/200] run pre-commit and apply the corresponding linter fixes --- news/5214.bugfix.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/news/5214.bugfix.rst b/news/5214.bugfix.rst index 3bb5f6a143..601dc83564 100644 --- a/news/5214.bugfix.rst +++ b/news/5214.bugfix.rst @@ -1,2 +1 @@ Fix auto-complete crashing on 'install' and 'uninstall' keywords - From f8215cc149872239218aaf5def84a3baf57dc2f3 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 10:40:21 -0400 Subject: [PATCH 018/200] Convert this test off pip-shims, it became flakey recently. --- pipenv/utils/dependencies.py | 5 ++--- tests/unit/test_utils.py | 21 ++++++--------------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/pipenv/utils/dependencies.py b/pipenv/utils/dependencies.py index a1f70cbc3c..b4ca1a7028 100644 --- a/pipenv/utils/dependencies.py +++ b/pipenv/utils/dependencies.py @@ -1,5 +1,6 @@ import os from contextlib import contextmanager +from tempfile import NamedTemporaryFile from typing import Mapping, Sequence from pipenv.patched.pip._vendor.packaging.markers import Marker @@ -273,9 +274,7 @@ def convert_deps_to_pip( return dependencies # Write requirements.txt to tmp directory. - from pipenv.vendor.vistir.path import create_tracked_tempfile - - f = create_tracked_tempfile(suffix="-requirements.txt", delete=False) + f = NamedTemporaryFile(suffix="-requirements.txt", delete=False) f.write("\n".join(dependencies).encode("utf-8")) f.close() return f.name diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index bc7eda9c48..04df666357 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1,4 +1,5 @@ import os +from unittest import mock import pytest @@ -83,13 +84,11 @@ def mock_unpack(link, source_dir, download_dir, only_download=False, session=Non @pytest.mark.utils @pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS) @pytest.mark.needs_internet -def test_convert_deps_to_pip(monkeypatch, deps, expected): - with monkeypatch.context() as m: - from pipenv.vendor import pip_shims - m.setattr(pip_shims.shims, "unpack_url", mock_unpack) - if expected.startswith("Django"): - expected = expected.lower() - assert dependencies.convert_deps_to_pip(deps, r=False) == [expected] +@mock.patch("pipenv.patched.pip._internal.operations.prepare.unpack_url", mock_unpack) +def test_convert_deps_to_pip(deps, expected): + if expected.startswith("Django"): + expected = expected.lower() + assert dependencies.convert_deps_to_pip(deps, r=False) == [expected] @pytest.mark.utils @@ -134,14 +133,6 @@ def test_convert_deps_to_pip_one_way(deps, expected): assert dependencies.convert_deps_to_pip(deps, r=False) == [expected.lower()] -@pytest.mark.skipif(isinstance("", str), reason="don't need to test if unicode is str") -@pytest.mark.utils -def test_convert_deps_to_pip_unicode(): - deps = {"django": "==1.10"} - deps = dependencies.convert_deps_to_pip(deps, r=False) - assert deps[0] == "django==1.10" - - @pytest.mark.parametrize("line,result", [ ("-i https://example.com/simple/", ("https://example.com/simple/", None, None, [])), ("--extra-index-url=https://example.com/simple/", (None, "https://example.com/simple/", None, [])), From 49087c78f59b004bb9f4140cb75101c24b0f4a9b Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 10:43:51 -0400 Subject: [PATCH 019/200] Add news fragment. --- news/5226.trivial.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5226.trivial.rst diff --git a/news/5226.trivial.rst b/news/5226.trivial.rst new file mode 100644 index 0000000000..8937f4dcfa --- /dev/null +++ b/news/5226.trivial.rst @@ -0,0 +1 @@ +Modernize the test ``test_convert_deps_to_pip`` to not use ``pip-shims`` and the code it calls to not use ``vistir``. From 8f3d6d48b3bf46b9f0efd099610991f5b19b46f6 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Mon, 1 Aug 2022 21:07:20 -0400 Subject: [PATCH 020/200] Add missing vendoring files that get generated when running vendoring. --- .../pip/_vendor/cachecontrol/LICENSE.txt | 13 + .../2021-05-14-16-06-02.bpo-44095.v_pLwY.rst | 2 + pipenv/vendor/markupsafe/_speedups.c | 339 ++++++++++++++++++ 3 files changed, 354 insertions(+) create mode 100644 pipenv/patched/pip/_vendor/cachecontrol/LICENSE.txt create mode 100644 pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst create mode 100644 pipenv/vendor/markupsafe/_speedups.c diff --git a/pipenv/patched/pip/_vendor/cachecontrol/LICENSE.txt b/pipenv/patched/pip/_vendor/cachecontrol/LICENSE.txt new file mode 100644 index 0000000000..d8b3b56d36 --- /dev/null +++ b/pipenv/patched/pip/_vendor/cachecontrol/LICENSE.txt @@ -0,0 +1,13 @@ +Copyright 2012-2021 Eric Larson + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst b/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst new file mode 100644 index 0000000000..ee03e933f3 --- /dev/null +++ b/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst @@ -0,0 +1,2 @@ +:class:`zipfile.Path` now supports :attr:`zipfile.Path.stem`, +:attr:`zipfile.Path.suffixes`, and :attr:`zipfile.Path.suffix` attributes. diff --git a/pipenv/vendor/markupsafe/_speedups.c b/pipenv/vendor/markupsafe/_speedups.c new file mode 100644 index 0000000000..44967b1fdc --- /dev/null +++ b/pipenv/vendor/markupsafe/_speedups.c @@ -0,0 +1,339 @@ +#include + +static PyObject* markup; + +static int +init_constants(void) +{ + PyObject *module; + + /* import markup type so that we can mark the return value */ + module = PyImport_ImportModule("markupsafe"); + if (!module) + return 0; + markup = PyObject_GetAttrString(module, "Markup"); + Py_DECREF(module); + + return 1; +} + +#define GET_DELTA(inp, inp_end, delta) \ + while (inp < inp_end) { \ + switch (*inp++) { \ + case '"': \ + case '\'': \ + case '&': \ + delta += 4; \ + break; \ + case '<': \ + case '>': \ + delta += 3; \ + break; \ + } \ + } + +#define DO_ESCAPE(inp, inp_end, outp) \ + { \ + Py_ssize_t ncopy = 0; \ + while (inp < inp_end) { \ + switch (*inp) { \ + case '"': \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + outp += ncopy; ncopy = 0; \ + *outp++ = '&'; \ + *outp++ = '#'; \ + *outp++ = '3'; \ + *outp++ = '4'; \ + *outp++ = ';'; \ + break; \ + case '\'': \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + outp += ncopy; ncopy = 0; \ + *outp++ = '&'; \ + *outp++ = '#'; \ + *outp++ = '3'; \ + *outp++ = '9'; \ + *outp++ = ';'; \ + break; \ + case '&': \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + outp += ncopy; ncopy = 0; \ + *outp++ = '&'; \ + *outp++ = 'a'; \ + *outp++ = 'm'; \ + *outp++ = 'p'; \ + *outp++ = ';'; \ + break; \ + case '<': \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + outp += ncopy; ncopy = 0; \ + *outp++ = '&'; \ + *outp++ = 'l'; \ + *outp++ = 't'; \ + *outp++ = ';'; \ + break; \ + case '>': \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + outp += ncopy; ncopy = 0; \ + *outp++ = '&'; \ + *outp++ = 'g'; \ + *outp++ = 't'; \ + *outp++ = ';'; \ + break; \ + default: \ + ncopy++; \ + } \ + inp++; \ + } \ + memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ + } + +static PyObject* +escape_unicode_kind1(PyUnicodeObject *in) +{ + Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in); + Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in); + Py_UCS1 *outp; + PyObject *out; + Py_ssize_t delta = 0; + + GET_DELTA(inp, inp_end, delta); + if (!delta) { + Py_INCREF(in); + return (PyObject*)in; + } + + out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, + PyUnicode_IS_ASCII(in) ? 127 : 255); + if (!out) + return NULL; + + inp = PyUnicode_1BYTE_DATA(in); + outp = PyUnicode_1BYTE_DATA(out); + DO_ESCAPE(inp, inp_end, outp); + return out; +} + +static PyObject* +escape_unicode_kind2(PyUnicodeObject *in) +{ + Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in); + Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in); + Py_UCS2 *outp; + PyObject *out; + Py_ssize_t delta = 0; + + GET_DELTA(inp, inp_end, delta); + if (!delta) { + Py_INCREF(in); + return (PyObject*)in; + } + + out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535); + if (!out) + return NULL; + + inp = PyUnicode_2BYTE_DATA(in); + outp = PyUnicode_2BYTE_DATA(out); + DO_ESCAPE(inp, inp_end, outp); + return out; +} + + +static PyObject* +escape_unicode_kind4(PyUnicodeObject *in) +{ + Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in); + Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in); + Py_UCS4 *outp; + PyObject *out; + Py_ssize_t delta = 0; + + GET_DELTA(inp, inp_end, delta); + if (!delta) { + Py_INCREF(in); + return (PyObject*)in; + } + + out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111); + if (!out) + return NULL; + + inp = PyUnicode_4BYTE_DATA(in); + outp = PyUnicode_4BYTE_DATA(out); + DO_ESCAPE(inp, inp_end, outp); + return out; +} + +static PyObject* +escape_unicode(PyUnicodeObject *in) +{ + if (PyUnicode_READY(in)) + return NULL; + + switch (PyUnicode_KIND(in)) { + case PyUnicode_1BYTE_KIND: + return escape_unicode_kind1(in); + case PyUnicode_2BYTE_KIND: + return escape_unicode_kind2(in); + case PyUnicode_4BYTE_KIND: + return escape_unicode_kind4(in); + } + assert(0); /* shouldn't happen */ + return NULL; +} + +static PyObject* +escape(PyObject *self, PyObject *text) +{ + static PyObject *id_html; + PyObject *s = NULL, *rv = NULL, *html; + + if (id_html == NULL) { + id_html = PyUnicode_InternFromString("__html__"); + if (id_html == NULL) { + return NULL; + } + } + + /* we don't have to escape integers, bools or floats */ + if (PyLong_CheckExact(text) || + PyFloat_CheckExact(text) || PyBool_Check(text) || + text == Py_None) + return PyObject_CallFunctionObjArgs(markup, text, NULL); + + /* if the object has an __html__ method that performs the escaping */ + html = PyObject_GetAttr(text ,id_html); + if (html) { + s = PyObject_CallObject(html, NULL); + Py_DECREF(html); + if (s == NULL) { + return NULL; + } + /* Convert to Markup object */ + rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL); + Py_DECREF(s); + return rv; + } + + /* otherwise make the object unicode if it isn't, then escape */ + PyErr_Clear(); + if (!PyUnicode_Check(text)) { + PyObject *unicode = PyObject_Str(text); + if (!unicode) + return NULL; + s = escape_unicode((PyUnicodeObject*)unicode); + Py_DECREF(unicode); + } + else + s = escape_unicode((PyUnicodeObject*)text); + + /* convert the unicode string into a markup object. */ + rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL); + Py_DECREF(s); + return rv; +} + + +static PyObject* +escape_silent(PyObject *self, PyObject *text) +{ + if (text != Py_None) + return escape(self, text); + return PyObject_CallFunctionObjArgs(markup, NULL); +} + + +static PyObject* +soft_str(PyObject *self, PyObject *s) +{ + if (!PyUnicode_Check(s)) + return PyObject_Str(s); + Py_INCREF(s); + return s; +} + + +static PyObject* +soft_unicode(PyObject *self, PyObject *s) +{ + PyErr_WarnEx( + PyExc_DeprecationWarning, + "'soft_unicode' has been renamed to 'soft_str'. The old name" + " will be removed in MarkupSafe 2.1.", + 2 + ); + return soft_str(self, s); +} + + +static PyMethodDef module_methods[] = { + { + "escape", + (PyCFunction)escape, + METH_O, + "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in" + " the string with HTML-safe sequences. Use this if you need to display" + " text that might contain such characters in HTML.\n\n" + "If the object has an ``__html__`` method, it is called and the" + " return value is assumed to already be safe for HTML.\n\n" + ":param s: An object to be converted to a string and escaped.\n" + ":return: A :class:`Markup` string with the escaped text.\n" + }, + { + "escape_silent", + (PyCFunction)escape_silent, + METH_O, + "Like :func:`escape` but treats ``None`` as the empty string." + " Useful with optional values, as otherwise you get the string" + " ``'None'`` when the value is ``None``.\n\n" + ">>> escape(None)\n" + "Markup('None')\n" + ">>> escape_silent(None)\n" + "Markup('')\n" + }, + { + "soft_str", + (PyCFunction)soft_str, + METH_O, + "Convert an object to a string if it isn't already. This preserves" + " a :class:`Markup` string rather than converting it back to a basic" + " string, so it will still be marked as safe and won't be escaped" + " again.\n\n" + ">>> value = escape(\"\")\n" + ">>> value\n" + "Markup('<User 1>')\n" + ">>> escape(str(value))\n" + "Markup('&lt;User 1&gt;')\n" + ">>> escape(soft_str(value))\n" + "Markup('<User 1>')\n" + }, + { + "soft_unicode", + (PyCFunction)soft_unicode, + METH_O, + "" + }, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +static struct PyModuleDef module_definition = { + PyModuleDef_HEAD_INIT, + "markupsafe._speedups", + NULL, + -1, + module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC +PyInit__speedups(void) +{ + if (!init_constants()) + return NULL; + + return PyModule_Create(&module_definition); +} From 0c90f78d356c4e0ffa0dc6d064e252787cb0487a Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 21:08:05 -0400 Subject: [PATCH 021/200] PR feedback. --- .gitignore | 4 + .../2021-05-14-16-06-02.bpo-44095.v_pLwY.rst | 2 - pipenv/vendor/markupsafe/_speedups.c | 339 ------------------ 3 files changed, 4 insertions(+), 341 deletions(-) delete mode 100644 pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst delete mode 100644 pipenv/vendor/markupsafe/_speedups.c diff --git a/.gitignore b/.gitignore index 1420349252..a3d9ab5e0e 100644 --- a/.gitignore +++ b/.gitignore @@ -168,3 +168,7 @@ prime/ stage/ pip-wheel-metadata/ .vim/ + +# Vendoring Files that generate but we don't want +pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst +pipenv/vendor/markupsafe/_speedups.c diff --git a/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst b/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst deleted file mode 100644 index ee03e933f3..0000000000 --- a/pipenv/vendor/Misc/NEWS.d/next/Library/2021-05-14-16-06-02.bpo-44095.v_pLwY.rst +++ /dev/null @@ -1,2 +0,0 @@ -:class:`zipfile.Path` now supports :attr:`zipfile.Path.stem`, -:attr:`zipfile.Path.suffixes`, and :attr:`zipfile.Path.suffix` attributes. diff --git a/pipenv/vendor/markupsafe/_speedups.c b/pipenv/vendor/markupsafe/_speedups.c deleted file mode 100644 index 44967b1fdc..0000000000 --- a/pipenv/vendor/markupsafe/_speedups.c +++ /dev/null @@ -1,339 +0,0 @@ -#include - -static PyObject* markup; - -static int -init_constants(void) -{ - PyObject *module; - - /* import markup type so that we can mark the return value */ - module = PyImport_ImportModule("markupsafe"); - if (!module) - return 0; - markup = PyObject_GetAttrString(module, "Markup"); - Py_DECREF(module); - - return 1; -} - -#define GET_DELTA(inp, inp_end, delta) \ - while (inp < inp_end) { \ - switch (*inp++) { \ - case '"': \ - case '\'': \ - case '&': \ - delta += 4; \ - break; \ - case '<': \ - case '>': \ - delta += 3; \ - break; \ - } \ - } - -#define DO_ESCAPE(inp, inp_end, outp) \ - { \ - Py_ssize_t ncopy = 0; \ - while (inp < inp_end) { \ - switch (*inp) { \ - case '"': \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - outp += ncopy; ncopy = 0; \ - *outp++ = '&'; \ - *outp++ = '#'; \ - *outp++ = '3'; \ - *outp++ = '4'; \ - *outp++ = ';'; \ - break; \ - case '\'': \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - outp += ncopy; ncopy = 0; \ - *outp++ = '&'; \ - *outp++ = '#'; \ - *outp++ = '3'; \ - *outp++ = '9'; \ - *outp++ = ';'; \ - break; \ - case '&': \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - outp += ncopy; ncopy = 0; \ - *outp++ = '&'; \ - *outp++ = 'a'; \ - *outp++ = 'm'; \ - *outp++ = 'p'; \ - *outp++ = ';'; \ - break; \ - case '<': \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - outp += ncopy; ncopy = 0; \ - *outp++ = '&'; \ - *outp++ = 'l'; \ - *outp++ = 't'; \ - *outp++ = ';'; \ - break; \ - case '>': \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - outp += ncopy; ncopy = 0; \ - *outp++ = '&'; \ - *outp++ = 'g'; \ - *outp++ = 't'; \ - *outp++ = ';'; \ - break; \ - default: \ - ncopy++; \ - } \ - inp++; \ - } \ - memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \ - } - -static PyObject* -escape_unicode_kind1(PyUnicodeObject *in) -{ - Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in); - Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in); - Py_UCS1 *outp; - PyObject *out; - Py_ssize_t delta = 0; - - GET_DELTA(inp, inp_end, delta); - if (!delta) { - Py_INCREF(in); - return (PyObject*)in; - } - - out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, - PyUnicode_IS_ASCII(in) ? 127 : 255); - if (!out) - return NULL; - - inp = PyUnicode_1BYTE_DATA(in); - outp = PyUnicode_1BYTE_DATA(out); - DO_ESCAPE(inp, inp_end, outp); - return out; -} - -static PyObject* -escape_unicode_kind2(PyUnicodeObject *in) -{ - Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in); - Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in); - Py_UCS2 *outp; - PyObject *out; - Py_ssize_t delta = 0; - - GET_DELTA(inp, inp_end, delta); - if (!delta) { - Py_INCREF(in); - return (PyObject*)in; - } - - out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535); - if (!out) - return NULL; - - inp = PyUnicode_2BYTE_DATA(in); - outp = PyUnicode_2BYTE_DATA(out); - DO_ESCAPE(inp, inp_end, outp); - return out; -} - - -static PyObject* -escape_unicode_kind4(PyUnicodeObject *in) -{ - Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in); - Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in); - Py_UCS4 *outp; - PyObject *out; - Py_ssize_t delta = 0; - - GET_DELTA(inp, inp_end, delta); - if (!delta) { - Py_INCREF(in); - return (PyObject*)in; - } - - out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111); - if (!out) - return NULL; - - inp = PyUnicode_4BYTE_DATA(in); - outp = PyUnicode_4BYTE_DATA(out); - DO_ESCAPE(inp, inp_end, outp); - return out; -} - -static PyObject* -escape_unicode(PyUnicodeObject *in) -{ - if (PyUnicode_READY(in)) - return NULL; - - switch (PyUnicode_KIND(in)) { - case PyUnicode_1BYTE_KIND: - return escape_unicode_kind1(in); - case PyUnicode_2BYTE_KIND: - return escape_unicode_kind2(in); - case PyUnicode_4BYTE_KIND: - return escape_unicode_kind4(in); - } - assert(0); /* shouldn't happen */ - return NULL; -} - -static PyObject* -escape(PyObject *self, PyObject *text) -{ - static PyObject *id_html; - PyObject *s = NULL, *rv = NULL, *html; - - if (id_html == NULL) { - id_html = PyUnicode_InternFromString("__html__"); - if (id_html == NULL) { - return NULL; - } - } - - /* we don't have to escape integers, bools or floats */ - if (PyLong_CheckExact(text) || - PyFloat_CheckExact(text) || PyBool_Check(text) || - text == Py_None) - return PyObject_CallFunctionObjArgs(markup, text, NULL); - - /* if the object has an __html__ method that performs the escaping */ - html = PyObject_GetAttr(text ,id_html); - if (html) { - s = PyObject_CallObject(html, NULL); - Py_DECREF(html); - if (s == NULL) { - return NULL; - } - /* Convert to Markup object */ - rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL); - Py_DECREF(s); - return rv; - } - - /* otherwise make the object unicode if it isn't, then escape */ - PyErr_Clear(); - if (!PyUnicode_Check(text)) { - PyObject *unicode = PyObject_Str(text); - if (!unicode) - return NULL; - s = escape_unicode((PyUnicodeObject*)unicode); - Py_DECREF(unicode); - } - else - s = escape_unicode((PyUnicodeObject*)text); - - /* convert the unicode string into a markup object. */ - rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL); - Py_DECREF(s); - return rv; -} - - -static PyObject* -escape_silent(PyObject *self, PyObject *text) -{ - if (text != Py_None) - return escape(self, text); - return PyObject_CallFunctionObjArgs(markup, NULL); -} - - -static PyObject* -soft_str(PyObject *self, PyObject *s) -{ - if (!PyUnicode_Check(s)) - return PyObject_Str(s); - Py_INCREF(s); - return s; -} - - -static PyObject* -soft_unicode(PyObject *self, PyObject *s) -{ - PyErr_WarnEx( - PyExc_DeprecationWarning, - "'soft_unicode' has been renamed to 'soft_str'. The old name" - " will be removed in MarkupSafe 2.1.", - 2 - ); - return soft_str(self, s); -} - - -static PyMethodDef module_methods[] = { - { - "escape", - (PyCFunction)escape, - METH_O, - "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in" - " the string with HTML-safe sequences. Use this if you need to display" - " text that might contain such characters in HTML.\n\n" - "If the object has an ``__html__`` method, it is called and the" - " return value is assumed to already be safe for HTML.\n\n" - ":param s: An object to be converted to a string and escaped.\n" - ":return: A :class:`Markup` string with the escaped text.\n" - }, - { - "escape_silent", - (PyCFunction)escape_silent, - METH_O, - "Like :func:`escape` but treats ``None`` as the empty string." - " Useful with optional values, as otherwise you get the string" - " ``'None'`` when the value is ``None``.\n\n" - ">>> escape(None)\n" - "Markup('None')\n" - ">>> escape_silent(None)\n" - "Markup('')\n" - }, - { - "soft_str", - (PyCFunction)soft_str, - METH_O, - "Convert an object to a string if it isn't already. This preserves" - " a :class:`Markup` string rather than converting it back to a basic" - " string, so it will still be marked as safe and won't be escaped" - " again.\n\n" - ">>> value = escape(\"\")\n" - ">>> value\n" - "Markup('<User 1>')\n" - ">>> escape(str(value))\n" - "Markup('&lt;User 1&gt;')\n" - ">>> escape(soft_str(value))\n" - "Markup('<User 1>')\n" - }, - { - "soft_unicode", - (PyCFunction)soft_unicode, - METH_O, - "" - }, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -static struct PyModuleDef module_definition = { - PyModuleDef_HEAD_INIT, - "markupsafe._speedups", - NULL, - -1, - module_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyMODINIT_FUNC -PyInit__speedups(void) -{ - if (!init_constants()) - return NULL; - - return PyModule_Create(&module_definition); -} From 278e7d1878353ab7775caaafab4c836d71900044 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 23:47:33 -0400 Subject: [PATCH 022/200] Remove other spots that did not use the internal pip version to exectue pipenv commands. --- pipenv/cli/command.py | 2 +- pipenv/core.py | 85 +++++++++++++++------------------------- pipenv/project.py | 2 +- pipenv/utils/shell.py | 37 +++++++++++++++++ tests/unit/test_utils.py | 2 +- 5 files changed, 72 insertions(+), 56 deletions(-) diff --git a/pipenv/cli/command.py b/pipenv/cli/command.py index 069037590b..937be3d9d0 100644 --- a/pipenv/cli/command.py +++ b/pipenv/cli/command.py @@ -79,6 +79,7 @@ def cli( site_packages=None, **kwargs, ): + from pipenv.utils.shell import system_which from pipenv.utils.spinner import create_spinner from ..core import ( @@ -88,7 +89,6 @@ def cli( do_where, ensure_project, format_help, - system_which, warn_in_virtualenv, ) diff --git a/pipenv/core.py b/pipenv/core.py index a9c05edc24..77c8f93dac 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -17,6 +17,7 @@ from pipenv import environments, exceptions, pep508checker, progress from pipenv._compat import decode_for_output, fix_utf8 +from pipenv.patched.pip._internal.build_env import _get_runnable_pip from pipenv.patched.pip._internal.exceptions import PipError from pipenv.patched.pip._internal.network.session import PipSession from pipenv.patched.pip._internal.req.constructors import ( @@ -40,7 +41,9 @@ cmd_list_to_shell, find_python, is_python_command, + project_python, subprocess_run, + system_which, ) from pipenv.utils.spinner import create_spinner from pipenv.vendor import click @@ -1037,7 +1040,8 @@ def get_downloads_info(project, names_map, section): version = parse_download_fname(fname, name) # Get the hash of each file. cmd = [ - which_pip(project), + project_python(project), + _get_runnable_pip(), "hash", os.sep.join([project.download_location, fname]), ] @@ -1200,7 +1204,8 @@ def do_purge(project, bare=False, downloads=False, allow_global=False): click.echo(fix_utf8(f"Found {len(to_remove)} installed package(s), purging...")) command = [ - which_pip(project, allow_global=allow_global), + project_python(project), + _get_runnable_pip(), "uninstall", "-y", ] + list(to_remove) @@ -1516,8 +1521,7 @@ def pip_install( pip_command = [ project._which("python", allow_global=allow_global), - "-m", - "pip", + _get_runnable_pip(), "install", ] pip_args = get_pip_args( @@ -1568,7 +1572,8 @@ def pip_download(project, package_name): } for source in project.sources: cmd = [ - which_pip(project), + project_python(project), + _get_runnable_pip(), "download", package_name, "-i", @@ -1618,52 +1623,26 @@ def fallback_which(command, location=None, allow_global=False, system=False): return "" -def which_pip(project, allow_global=False): - """Returns the location of virtualenv-installed pip.""" +def which_pip(project): + """Prefers to utilize the vendor'd version of pip, falls back to the location of virtualenv-installed pip.""" location = None if "VIRTUAL_ENV" in os.environ: location = os.environ["VIRTUAL_ENV"] - if allow_global: - if location: - pip = project._which("pip", location=location) - if pip: - return pip + pip = project._which("python", location=location) + if pip: + return pip + if not pip: for p in ("pip", "pip3", "pip2"): where = system_which(p) if where: return where - pip = project._which("pip") - if not pip: - pip = fallback_which("pip", allow_global=allow_global, location=location) + pip = fallback_which("pip", allow_global=True, location=location) return pip -def system_which(command, path=None): - """Emulates the system's which. Returns None if not found.""" - import shutil - - result = shutil.which(command, path=path) - if result is None: - _which = "where" if os.name == "nt" else "which -a" - env = {"PATH": path} if path else None - c = subprocess_run(f"{_which} {command}", shell=True, env=env) - if c.returncode == 127: - click.echo( - "{}: the {} system utility is required for Pipenv to find Python installations properly." - "\n Please install it.".format( - click.style("Warning", fg="red", bold=True), - click.style(_which, fg="yellow"), - ), - err=True, - ) - if c.returncode == 0: - result = next(iter(c.stdout.splitlines()), None) - return result - - def format_help(help): """Formats the help string.""" help = help.replace("Options:", str(click.style("Options:", bold=True))) @@ -2354,7 +2333,6 @@ def do_uninstall( for normalized, p in selected_pkg_map.items() if normalized in (used_packages - bad_pkgs) ] - pip_path = None for normalized, package_name in selected_pkg_map.items(): click.secho( fix_utf8(f"Uninstalling {click.style(package_name)}..."), @@ -2364,9 +2342,13 @@ def do_uninstall( # Uninstall the package. if package_name in packages_to_remove: with project.environment.activated(): - if pip_path is None: - pip_path = which_pip(project, allow_global=system) - cmd = [pip_path, "uninstall", package_name, "-y"] + cmd = [ + project_python(project), + _get_runnable_pip(), + "uninstall", + package_name, + "-y", + ] c = run_command(cmd, is_verbose=project.s.is_verbose()) click.secho(c.stdout, fg="cyan") if c.returncode != 0: @@ -2672,15 +2654,7 @@ def do_check( safety_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "patched", "safety" ) - if not system: - python = project._which("python") - else: - interpreters = [system_which(p) for p in ("python", "python3", "python2")] - python = interpreters[0] if interpreters else None - if not python: - click.secho("The Python interpreter can't be found.", fg="red", err=True) - sys.exit(1) - _cmd = [Path(python).as_posix()] + _cmd = [project_python(project)] # Run the PEP 508 checker in the virtualenv. cmd = _cmd + [Path(pep508checker_path).as_posix()] c = run_command(cmd, is_verbose=project.s.is_verbose()) @@ -3031,7 +3005,6 @@ def do_clean( if used_package in installed_package_names: installed_package_names.remove(used_package) failure = False - cmd = [which_pip(project, allow_global=system), "uninstall", "-y", "-qq"] for apparent_bad_package in installed_package_names: if dry_run and not bare: click.echo(apparent_bad_package) @@ -3043,7 +3016,13 @@ def do_clean( bold=True, ) # Uninstall the package. - cmd = [which_pip(project), "uninstall", apparent_bad_package, "-y"] + cmd = [ + project_python(project), + _get_runnable_pip(), + "uninstall", + apparent_bad_package, + "-y", + ] c = run_command(cmd, is_verbose=project.s.is_verbose()) if c.returncode != 0: failure = True diff --git a/pipenv/project.py b/pipenv/project.py index 81d0d33fe2..b219907fb4 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -20,7 +20,6 @@ import vistir from pipenv.cmdparse import Script -from pipenv.core import system_which from pipenv.environment import Environment from pipenv.environments import Setting, is_in_virtualenv, normalize_pipfile_path from pipenv.patched.pip._internal.commands.install import InstallCommand @@ -41,6 +40,7 @@ is_virtual_environment, looks_like_dir, safe_expandvars, + system_which, ) from pipenv.utils.toml import cleanup_toml, convert_toml_outline_tables from pipenv.vendor.cached_property import cached_property diff --git a/pipenv/utils/shell.py b/pipenv/utils/shell.py index 9af584b008..9e8c7ef702 100644 --- a/pipenv/utils/shell.py +++ b/pipenv/utils/shell.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import errno import os import posixpath @@ -449,3 +451,38 @@ def env_to_bool(val): if val.lower() in TRUE_VALUES: return True raise ValueError(f"Value is not a valid boolean-like: {val}") + + +def project_python(project, system=False): + if not system: + python = project._which("python") + else: + interpreters = [system_which(p) for p in ("python", "python3")] + python = interpreters[0] if interpreters else None + if not python: + click.secho("The Python interpreter can't be found.", fg="red", err=True) + sys.exit(1) + return Path(python).as_posix() + + +def system_which(command, path=None): + """Emulates the system's which. Returns None if not found.""" + import shutil + + result = shutil.which(command, path=path) + if result is None: + _which = "where" if os.name == "nt" else "which -a" + env = {"PATH": path} if path else None + c = subprocess_run(f"{_which} {command}", shell=True, env=env) + if c.returncode == 127: + click.echo( + "{}: the {} system utility is required for Pipenv to find Python installations properly." + "\n Please install it.".format( + click.style("Warning", fg="red", bold=True), + click.style(_which, fg="yellow"), + ), + err=True, + ) + if c.returncode == 0: + result = next(iter(c.stdout.splitlines()), None) + return result diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 04df666357..0ed4a35354 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -84,7 +84,7 @@ def mock_unpack(link, source_dir, download_dir, only_download=False, session=Non @pytest.mark.utils @pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS) @pytest.mark.needs_internet -@mock.patch("pipenv.patched.pip._internal.operations.prepare.unpack_url", mock_unpack) +# @mock.patch("pipenv.patched.pip._internal.operations.prepare.unpack_url", mock_unpack) def test_convert_deps_to_pip(deps, expected): if expected.startswith("Django"): expected = expected.lower() From 905951fc71fa1f94b3e475eac878eb3819b58b8d Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 23:53:24 -0400 Subject: [PATCH 023/200] Add news fragment. --- news/5200.removal.rst | 2 +- news/5229.bugfix.rst | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 news/5229.bugfix.rst diff --git a/news/5200.removal.rst b/news/5200.removal.rst index 13c8857553..848ef1c05c 100644 --- a/news/5200.removal.rst +++ b/news/5200.removal.rst @@ -1 +1 @@ -The deprecated way of generating requriements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. +The deprecated way of generating requirements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. diff --git a/news/5229.bugfix.rst b/news/5229.bugfix.rst new file mode 100644 index 0000000000..bb8bcdaae7 --- /dev/null +++ b/news/5229.bugfix.rst @@ -0,0 +1 @@ +Address remaining ``pipenv`` commands that were still referencing the user or system installed ``pip`` to use the vendored ``pip`` internal to ``pipenv``. From 68d1781b0fd8b0b8a3efd50329d8caf5f3daf38d Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 6 Aug 2022 23:56:34 -0400 Subject: [PATCH 024/200] Remove commented out line. --- tests/unit/test_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 0ed4a35354..3aa22dbbb7 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -84,7 +84,6 @@ def mock_unpack(link, source_dir, download_dir, only_download=False, session=Non @pytest.mark.utils @pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS) @pytest.mark.needs_internet -# @mock.patch("pipenv.patched.pip._internal.operations.prepare.unpack_url", mock_unpack) def test_convert_deps_to_pip(deps, expected): if expected.startswith("Django"): expected = expected.lower() From ca0150eae689b6da055f609c9ed6e8191972d26a Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 00:49:25 -0400 Subject: [PATCH 025/200] Updated Pipfile.lock. --- Pipfile.lock | 120 ++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index 3cfcaeee35..98a61e32bf 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -139,7 +139,7 @@ "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da", "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4" ], - "markers": "platform_system == 'Windows'", + "markers": "sys_platform == 'win32'", "version": "==0.4.5" }, "coverage": { @@ -147,50 +147,50 @@ "toml" ], "hashes": [ - "sha256:0895ea6e6f7f9939166cc835df8fa4599e2d9b759b02d1521b574e13b859ac32", - "sha256:0f211df2cba951ffcae210ee00e54921ab42e2b64e0bf2c0befc977377fb09b7", - "sha256:147605e1702d996279bb3cc3b164f408698850011210d133a2cb96a73a2f7996", - "sha256:24b04d305ea172ccb21bee5bacd559383cba2c6fcdef85b7701cf2de4188aa55", - "sha256:25b7ec944f114f70803d6529394b64f8749e93cbfac0fe6c5ea1b7e6c14e8a46", - "sha256:2b20286c2b726f94e766e86a3fddb7b7e37af5d0c635bdfa7e4399bc523563de", - "sha256:2dff52b3e7f76ada36f82124703f4953186d9029d00d6287f17c68a75e2e6039", - "sha256:2f8553878a24b00d5ab04b7a92a2af50409247ca5c4b7a2bf4eabe94ed20d3ee", - "sha256:3def6791adf580d66f025223078dc84c64696a26f174131059ce8e91452584e1", - "sha256:422fa44070b42fef9fb8dabd5af03861708cdd6deb69463adc2130b7bf81332f", - "sha256:4f89d8e03c8a3757aae65570d14033e8edf192ee9298303db15955cadcff0c63", - "sha256:5336e0352c0b12c7e72727d50ff02557005f79a0b8dcad9219c7c4940a930083", - "sha256:54d8d0e073a7f238f0666d3c7c0d37469b2aa43311e4024c925ee14f5d5a1cbe", - "sha256:5ef42e1db047ca42827a85e34abe973971c635f83aed49611b7f3ab49d0130f0", - "sha256:5f65e5d3ff2d895dab76b1faca4586b970a99b5d4b24e9aafffc0ce94a6022d6", - "sha256:6c3ccfe89c36f3e5b9837b9ee507472310164f352c9fe332120b764c9d60adbe", - "sha256:6d0b48aff8e9720bdec315d67723f0babd936a7211dc5df453ddf76f89c59933", - "sha256:6fe75dcfcb889b6800f072f2af5a331342d63d0c1b3d2bf0f7b4f6c353e8c9c0", - "sha256:79419370d6a637cb18553ecb25228893966bd7935a9120fa454e7076f13b627c", - "sha256:7bb00521ab4f99fdce2d5c05a91bddc0280f0afaee0e0a00425e28e209d4af07", - "sha256:80db4a47a199c4563d4a25919ff29c97c87569130375beca3483b41ad5f698e8", - "sha256:866ebf42b4c5dbafd64455b0a1cd5aa7b4837a894809413b930026c91e18090b", - "sha256:8af6c26ba8df6338e57bedbf916d76bdae6308e57fc8f14397f03b5da8622b4e", - "sha256:a13772c19619118903d65a91f1d5fea84be494d12fd406d06c849b00d31bf120", - "sha256:a697977157adc052284a7160569b36a8bbec09db3c3220642e6323b47cec090f", - "sha256:a9032f9b7d38bdf882ac9f66ebde3afb8145f0d4c24b2e600bc4c6304aafb87e", - "sha256:b5e28db9199dd3833cc8a07fa6cf429a01227b5d429facb56eccd765050c26cd", - "sha256:c77943ef768276b61c96a3eb854eba55633c7a3fddf0a79f82805f232326d33f", - "sha256:d230d333b0be8042ac34808ad722eabba30036232e7a6fb3e317c49f61c93386", - "sha256:d4548be38a1c810d79e097a38107b6bf2ff42151900e47d49635be69943763d8", - "sha256:d4e7ced84a11c10160c0697a6cc0b214a5d7ab21dfec1cd46e89fbf77cc66fae", - "sha256:d56f105592188ce7a797b2bd94b4a8cb2e36d5d9b0d8a1d2060ff2a71e6b9bbc", - "sha256:d714af0bdba67739598849c9f18efdcc5a0412f4993914a0ec5ce0f1e864d783", - "sha256:d774d9e97007b018a651eadc1b3970ed20237395527e22cbeb743d8e73e0563d", - "sha256:e0524adb49c716ca763dbc1d27bedce36b14f33e6b8af6dba56886476b42957c", - "sha256:e2618cb2cf5a7cc8d698306e42ebcacd02fb7ef8cfc18485c59394152c70be97", - "sha256:e36750fbbc422c1c46c9d13b937ab437138b998fe74a635ec88989afb57a3978", - "sha256:edfdabe7aa4f97ed2b9dd5dde52d2bb29cb466993bb9d612ddd10d0085a683cf", - "sha256:f22325010d8824594820d6ce84fa830838f581a7fd86a9235f0d2ed6deb61e29", - "sha256:f23876b018dfa5d3e98e96f5644b109090f16a4acb22064e0f06933663005d39", - "sha256:f7bd0ffbcd03dc39490a1f40b2669cc414fae0c4e16b77bb26806a4d0b7d1452" + "sha256:04010af3c06ce2bfeb3b1e4e05d136f88d88c25f76cd4faff5d1fd84d11581ea", + "sha256:05de0762c1caed4a162b3e305f36cf20a548ff4da0be6766ad5c870704be3660", + "sha256:068d6f2a893af838291b8809c876973d885543411ea460f3e6886ac0ee941732", + "sha256:0a84376e4fd13cebce2c0ef8c2f037929c8307fb94af1e5dbe50272a1c651b5d", + "sha256:0e34247274bde982bbc613894d33f9e36358179db2ed231dd101c48dd298e7b0", + "sha256:0e3a41aad5919613483aad9ebd53336905cab1bd6788afd3995c2a972d89d795", + "sha256:306788fd019bb90e9cbb83d3f3c6becad1c048dd432af24f8320cf38ac085684", + "sha256:39ebd8e120cb77a06ee3d5fc26f9732670d1c397d7cd3acf02f6f62693b89b80", + "sha256:411fdd9f4203afd93b056c0868c8f9e5e16813e765de962f27e4e5798356a052", + "sha256:4822327b35cb032ff16af3bec27f73985448f08e874146b5b101e0e558b613dd", + "sha256:52f8b9fcf3c5e427d51bbab1fb92b575a9a9235d516f175b24712bcd4b5be917", + "sha256:53c8edd3b83a4ddba3d8c506f1359401e7770b30f2188f15c17a338adf5a14db", + "sha256:555a498999c44f5287cc95500486cd0d4f021af9162982cbe504d4cb388f73b5", + "sha256:59fc88bc13e30f25167e807b8cad3c41b7218ef4473a20c86fd98a7968733083", + "sha256:5a559aab40c716de80c7212295d0dc96bc1b6c719371c20dd18c5187c3155518", + "sha256:5de1e9335e2569974e20df0ce31493d315a830d7987e71a24a2a335a8d8459d3", + "sha256:6630d8d943644ea62132789940ca97d05fac83f73186eaf0930ffa715fbdab6b", + "sha256:73a10939dc345460ca0655356a470dd3de9759919186a82383c87b6eb315faf2", + "sha256:7856ea39059d75f822ff0df3a51ea6d76307c897048bdec3aad1377e4e9dca20", + "sha256:877ee5478fd78e100362aed56db47ccc5f23f6e7bb035a8896855f4c3e49bc9b", + "sha256:920a734fe3d311ca01883b4a19aa386c97b82b69fbc023458899cff0a0d621b9", + "sha256:923f9084d7e1d31b5f74c92396b05b18921ed01ee5350402b561a79dce3ea48d", + "sha256:a0d2df4227f645a879010461df2cea6b7e3fb5a97d7eafa210f7fb60345af9e8", + "sha256:a2738ba1ee544d6f294278cfb6de2dc1f9a737a780469b5366e662a218f806c3", + "sha256:a42eaaae772f14a5194f181740a67bfd48e8806394b8c67aa4399e09d0d6b5db", + "sha256:ab2b1a89d2bc7647622e9eaf06128a5b5451dccf7c242deaa31420b055716481", + "sha256:ab9ef0187d6c62b09dec83a84a3b94f71f9690784c84fd762fb3cf2d2b44c914", + "sha256:adf1a0d272633b21d645dd6e02e3293429c1141c7d65a58e4cbcd592d53b8e01", + "sha256:b104b6b1827d6a22483c469e3983a204bcf9c6bf7544bf90362c4654ebc2edf3", + "sha256:bc698580216050b5f4a34d2cdd2838b429c53314f1c4835fab7338200a8396f2", + "sha256:cdf7b83f04a313a21afb1f8730fe4dd09577fefc53bbdfececf78b2006f4268e", + "sha256:d5191d53afbe5b6059895fa7f58223d3751c42b8101fb3ce767e1a0b1a1d8f87", + "sha256:d75314b00825d70e1e34b07396e23f47ed1d4feedc0122748f9f6bd31a544840", + "sha256:e4d64304acf79766e650f7acb81d263a3ea6e2d0d04c5172b7189180ff2c023c", + "sha256:ec2ae1f398e5aca655b7084392d23e80efb31f7a660d2eecf569fb9f79b3fb94", + "sha256:eff095a5aac7011fdb51a2c82a8fae9ec5211577f4b764e1e59cfa27ceeb1b59", + "sha256:f1eda5cae434282712e40b42aaf590b773382afc3642786ac3ed39053973f61f", + "sha256:f217850ac0e046ede611312703423767ca032a7b952b5257efac963942c055de", + "sha256:f50d3a822947572496ea922ee7825becd8e3ae6fbd2400cd8236b7d64b17f285", + "sha256:fc294de50941d3da66a09dca06e206297709332050973eca17040278cb0918ff", + "sha256:ff9832434a9193fbd716fbe05f9276484e18d26cc4cf850853594bb322807ac3" ], "markers": "python_version >= '3.7'", - "version": "==6.4.2" + "version": "==6.4.3" }, "distlib": { "hashes": [ @@ -241,19 +241,19 @@ }, "flask": { "hashes": [ - "sha256:10dc2bae7a9b6ab59111d6dbece2e08fb0015d2e88d296c40323cc0c7aac2c2e", - "sha256:98b33b13ad76ee9c7a80d2f56a6c578780e55bf8281790c62d50d4b7fadec2b8" + "sha256:3c604c48c3d5b4c63e72134044c0b4fe90ff01ef65280b9fe2d38c8860d99fe5", + "sha256:9c2b81b9b1edcc835af72d600f1955e713a065e7cb41d7e51ee762b449d9c65d" ], "markers": "python_version >= '3.7'", - "version": "==2.2.0" + "version": "==2.2.1" }, "identify": { "hashes": [ - "sha256:a3d4c096b384d50d5e6dc5bc8b9bc44f1f61cefebd750a7b3e9f939b53fb214d", - "sha256:feaa9db2dc0ce333b453ce171c0cf1247bbfde2c55fc6bb785022d411a1b78b5" + "sha256:25851c8c1370effb22aaa3c987b30449e9ff0cece408f810ae6ce408fdd20893", + "sha256:887e7b91a1be152b0d46bbf072130235a8117392b9f1828446079a816a05ef44" ], "markers": "python_version >= '3.7'", - "version": "==2.5.2" + "version": "==2.5.3" }, "idna": { "hashes": [ @@ -415,14 +415,6 @@ ], "version": "==0.9.0" }, - "pip": { - "hashes": [ - "sha256:0bbbc87dfbe6eed217beff0021f8b7dea04c8f4a0baa9d31dc4cff281ffc5b2b", - "sha256:50516e47a2b79e77446f0d05649f0d53772c192571486236b1905492bfc24bac" - ], - "markers": "python_version >= '3.7'", - "version": "==22.2.1" - }, "pipenv": { "editable": true, "extras": [ @@ -605,11 +597,11 @@ }, "setuptools": { "hashes": [ - "sha256:273b6847ae61f7829c1affcdd9a32f67aa65233be508f4fbaab866c5faa4e408", - "sha256:d5340d16943a0f67057329db59b564e938bb3736c6e50ae16ea84d5e5d9ba6d0" + "sha256:73bfae4791da7c1c56882ab17577d00f7a37a0347162aeb9360058de0dc25083", + "sha256:abcb76aa4decd7a17cbe0e4b31bdf549d106ba7f668e87e0860f5f7b84b9b3fe" ], "markers": "python_version >= '3.7'", - "version": "==63.3.0" + "version": "==63.4.2" }, "six": { "hashes": [ @@ -726,7 +718,7 @@ "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" ], - "markers": "python_full_version < '3.11.0a7'", + "markers": "python_version >= '3.6'", "version": "==2.0.1" }, "towncrier": { @@ -754,11 +746,11 @@ }, "virtualenv": { "hashes": [ - "sha256:0ef5be6d07181946891f5abc8047fda8bc2f0b4b9bf222c64e6e8963baee76db", - "sha256:635b272a8e2f77cb051946f46c60a54ace3cb5e25568228bd6b57fc70eca9ff3" + "sha256:4193b7bc8a6cd23e4eb251ac64f29b4398ab2c233531e66e40b19a6b7b0d30c1", + "sha256:d86ea0bb50e06252d79e6c241507cb904fcd66090c3271381372d6221a3970f9" ], "markers": "python_version >= '3.6'", - "version": "==20.16.2" + "version": "==20.16.3" }, "virtualenv-clone": { "hashes": [ From 66c75bb0f6d18a4affccdfa0698410e5982bd793 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 01:03:41 -0400 Subject: [PATCH 026/200] This is possibly no longer required and could be breaking the build. --- .github/workflows/ci.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 48fcfacae0..91578dac68 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -83,9 +83,6 @@ jobs: id: python-path run: | echo ::set-output name=path::$(python -c "import sys; print(sys.executable)") - - name: Install latest pip, setuptools, wheel - run: | - python -m pip install --upgrade pip setuptools wheel --upgrade-strategy=eager - name: Install dependencies env: PIPENV_DEFAULT_PYTHON_VERSION: ${{ matrix.python-version }} From 90af9efbcd9d34cfe21429eed0d2476b6f251a89 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 01:12:38 -0400 Subject: [PATCH 027/200] This is possibly no longer required and could be breaking the build. --- setup.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 0a82b7a2b7..432ba3ffe9 100644 --- a/setup.py +++ b/setup.py @@ -2,9 +2,8 @@ import codecs import os import sys -from shutil import rmtree -from setuptools import Command, find_packages, setup +from setuptools import find_packages, setup here = os.path.abspath(os.path.dirname(__file__)) @@ -22,7 +21,6 @@ required = [ "certifi", - "setuptools>=36.2.1", "virtualenv-clone>=0.2.5", "virtualenv", ] From bbb039067e63949bc34d3e681d1ccba482a34894 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 01:16:35 -0400 Subject: [PATCH 028/200] not sure why setuptools is getting added to the lockfile, but its problematic. --- Pipfile.lock | 8 -------- 1 file changed, 8 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index 98a61e32bf..676301df01 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -595,14 +595,6 @@ "markers": "python_version >= '3.7' and python_version < '4'", "version": "==2.28.1" }, - "setuptools": { - "hashes": [ - "sha256:73bfae4791da7c1c56882ab17577d00f7a37a0347162aeb9360058de0dc25083", - "sha256:abcb76aa4decd7a17cbe0e4b31bdf549d106ba7f668e87e0860f5f7b84b9b3fe" - ], - "markers": "python_version >= '3.7'", - "version": "==63.4.2" - }, "six": { "hashes": [ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", From 825da6716dcee893b490347f22af1fe5c48b662c Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 10:08:37 -0400 Subject: [PATCH 029/200] Add back these setuptools steps. --- .github/workflows/ci.yaml | 3 +++ setup.py | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 91578dac68..48fcfacae0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -83,6 +83,9 @@ jobs: id: python-path run: | echo ::set-output name=path::$(python -c "import sys; print(sys.executable)") + - name: Install latest pip, setuptools, wheel + run: | + python -m pip install --upgrade pip setuptools wheel --upgrade-strategy=eager - name: Install dependencies env: PIPENV_DEFAULT_PYTHON_VERSION: ${{ matrix.python-version }} diff --git a/setup.py b/setup.py index 432ba3ffe9..5567962b37 100644 --- a/setup.py +++ b/setup.py @@ -21,6 +21,7 @@ required = [ "certifi", + "setuptools>=36.2.1", "virtualenv-clone>=0.2.5", "virtualenv", ] From d98602afe28b5ee359fc290ff99e8987ac080d8e Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 11:00:19 -0400 Subject: [PATCH 030/200] Try with version that has new setuptools available in pypi. --- pipenv/utils/shell.py | 2 -- tests/pypi | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pipenv/utils/shell.py b/pipenv/utils/shell.py index 9e8c7ef702..e77be0ab1b 100644 --- a/pipenv/utils/shell.py +++ b/pipenv/utils/shell.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import errno import os import posixpath diff --git a/tests/pypi b/tests/pypi index f553001342..d374a3bd90 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit f5530013426d6392d67cd1703f379d20a768c1cf +Subproject commit d374a3bd90485d30027952236b676cb1fdfaba9a From 2a9868875f9f4dd83ee3486be42b4ec79a6890ef Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 18:22:25 -0400 Subject: [PATCH 031/200] Fix for inclusion of setuptools in the lock and install phase + other BAD_PACKAGES. --- pipenv/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/core.py b/pipenv/core.py index 77c8f93dac..be5aba7904 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -730,7 +730,7 @@ def batch_install( deps_to_install = deps_list[:] deps_to_install.extend(sequential_deps) deps_to_install = [ - dep for dep in deps_to_install if not project.environment.is_satisfied(dep) + dep for dep in deps_to_install if not project.environment.is_satisfied(dep) and dep.name not in BAD_PACKAGES ] sequential_dep_names = [d.name for d in sequential_deps] From 8d41a28b4edf6d4bfb587ba622106e6b71136a86 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 18:59:56 -0400 Subject: [PATCH 032/200] Fix linting --- pipenv/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pipenv/core.py b/pipenv/core.py index be5aba7904..113ef9e5be 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -730,7 +730,9 @@ def batch_install( deps_to_install = deps_list[:] deps_to_install.extend(sequential_deps) deps_to_install = [ - dep for dep in deps_to_install if not project.environment.is_satisfied(dep) and dep.name not in BAD_PACKAGES + dep + for dep in deps_to_install + if not project.environment.is_satisfied(dep) and dep.name not in BAD_PACKAGES ] sequential_dep_names = [d.name for d in sequential_deps] From 4e32e72001f929cf1f93636d0d7f6a8a245e2cb8 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Sat, 6 Aug 2022 23:07:58 +0200 Subject: [PATCH 033/200] Remove unused function - write_backports_imports --- tasks/vendoring/__init__.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 612b9f8735..1eda7ea147 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -224,21 +224,6 @@ def rename_if_needed(ctx, vendor_dir, item): child.rename(str(new_path / child.name)) -def write_backport_imports(ctx, vendor_dir): - backport_dir = vendor_dir / "backports" - if not backport_dir.exists(): - return - backport_init = backport_dir / "__init__.py" - backport_libs = detect_vendored_libs(backport_dir) - init_py_lines = backport_init.read_text().splitlines() - for lib in backport_libs: - lib_line = f"from . import {lib}" - if lib_line not in init_py_lines: - log("Adding backport %s to __init__.py exports" % lib) - init_py_lines.append(lib_line) - backport_init.write_text("\n".join(init_py_lines) + "\n") - - def _ensure_package_in_requirements(ctx, requirements_file, package): requirement = None log("using requirements file: %s" % requirements_file) @@ -363,7 +348,6 @@ def vendor(ctx, vendor_dir, package=None, rewrite=True): elif item.name not in FILE_WHITE_LIST: if rewrite and not package or (package and item.stem.lower() in package): rewrite_file_imports(item, vendored_libs) - write_backport_imports(ctx, vendor_dir) if not package: apply_patches(ctx, patched=is_patched, pre=False) if is_patched: From 70fe10af70b69547ffefcfbc15e8308d1b568c28 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Sat, 6 Aug 2022 23:34:51 +0200 Subject: [PATCH 034/200] Remove vendored pep517, use it from pip._vendor --- pipenv/vendor/pep517/LICENSE | 21 - pipenv/vendor/pep517/__init__.py | 6 - pipenv/vendor/pep517/build.py | 127 ------ pipenv/vendor/pep517/check.py | 207 ---------- pipenv/vendor/pep517/colorlog.py | 115 ------ pipenv/vendor/pep517/compat.py | 42 -- pipenv/vendor/pep517/dirtools.py | 44 --- pipenv/vendor/pep517/envbuild.py | 171 -------- pipenv/vendor/pep517/in_process/__init__.py | 17 - .../vendor/pep517/in_process/_in_process.py | 349 ---------------- pipenv/vendor/pep517/meta.py | 92 ----- pipenv/vendor/pep517/wrappers.py | 371 ------------------ .../requirementslib/models/setup_info.py | 10 +- pipenv/vendor/vendor.txt | 1 - tasks/vendoring/__init__.py | 10 + 15 files changed, 15 insertions(+), 1568 deletions(-) delete mode 100644 pipenv/vendor/pep517/LICENSE delete mode 100644 pipenv/vendor/pep517/__init__.py delete mode 100644 pipenv/vendor/pep517/build.py delete mode 100644 pipenv/vendor/pep517/check.py delete mode 100644 pipenv/vendor/pep517/colorlog.py delete mode 100644 pipenv/vendor/pep517/compat.py delete mode 100644 pipenv/vendor/pep517/dirtools.py delete mode 100644 pipenv/vendor/pep517/envbuild.py delete mode 100644 pipenv/vendor/pep517/in_process/__init__.py delete mode 100644 pipenv/vendor/pep517/in_process/_in_process.py delete mode 100644 pipenv/vendor/pep517/meta.py delete mode 100644 pipenv/vendor/pep517/wrappers.py diff --git a/pipenv/vendor/pep517/LICENSE b/pipenv/vendor/pep517/LICENSE deleted file mode 100644 index b0ae9dbc26..0000000000 --- a/pipenv/vendor/pep517/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017 Thomas Kluyver - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/pipenv/vendor/pep517/__init__.py b/pipenv/vendor/pep517/__init__.py deleted file mode 100644 index f064d60c8b..0000000000 --- a/pipenv/vendor/pep517/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Wrappers to build Python packages using PEP 517 hooks -""" - -__version__ = '0.11.0' - -from .wrappers import * # noqa: F401, F403 diff --git a/pipenv/vendor/pep517/build.py b/pipenv/vendor/pep517/build.py deleted file mode 100644 index 3b75214532..0000000000 --- a/pipenv/vendor/pep517/build.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Build a project using PEP 517 hooks. -""" -import argparse -import io -import logging -import os -import shutil - -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller -from .dirtools import tempdir, mkdir_p -from .compat import FileNotFoundError, toml_load - -log = logging.getLogger(__name__) - - -def validate_system(system): - """ - Ensure build system has the requisite fields. - """ - required = {'requires', 'build-backend'} - if not (required <= set(system)): - message = "Missing required fields: {missing}".format( - missing=required-set(system), - ) - raise ValueError(message) - - -def load_system(source_dir): - """ - Load the build system from a source dir (pyproject.toml). - """ - pyproject = os.path.join(source_dir, 'pyproject.toml') - with io.open(pyproject, encoding="utf-8") as f: - pyproject_data = toml_load(f) - return pyproject_data['build-system'] - - -def compat_system(source_dir): - """ - Given a source dir, attempt to get a build system backend - and requirements from pyproject.toml. Fallback to - setuptools but only if the file was not found or a build - system was not indicated. - """ - try: - system = load_system(source_dir) - except (FileNotFoundError, KeyError): - system = {} - system.setdefault( - 'build-backend', - 'setuptools.build_meta:__legacy__', - ) - system.setdefault('requires', ['setuptools', 'wheel']) - return system - - -def _do_build(hooks, env, dist, dest): - get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) - get_requires = getattr(hooks, get_requires_name) - reqs = get_requires({}) - log.info('Got build requires: %s', reqs) - - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - - with tempdir() as td: - log.info('Trying to build %s in %s', dist, td) - build_name = 'build_{dist}'.format(**locals()) - build = getattr(hooks, build_name) - filename = build(td, {}) - source = os.path.join(td, filename) - shutil.move(source, os.path.join(dest, os.path.basename(filename))) - - -def build(source_dir, dist, dest=None, system=None): - system = system or load_system(source_dir) - dest = os.path.join(source_dir, dest or 'dist') - mkdir_p(dest) - - validate_system(system) - hooks = Pep517HookCaller( - source_dir, system['build-backend'], system.get('backend-path') - ) - - with BuildEnvironment() as env: - env.pip_install(system['requires']) - _do_build(hooks, env, dist, dest) - - -parser = argparse.ArgumentParser() -parser.add_argument( - 'source_dir', - help="A directory containing pyproject.toml", -) -parser.add_argument( - '--binary', '-b', - action='store_true', - default=False, -) -parser.add_argument( - '--source', '-s', - action='store_true', - default=False, -) -parser.add_argument( - '--out-dir', '-o', - help="Destination in which to save the builds relative to source dir", -) - - -def main(args): - log.warning('pep517.build is deprecated. ' - 'Consider switching to https://pypi.org/project/build/') - - # determine which dists to build - dists = list(filter(None, ( - 'sdist' if args.source or not args.binary else None, - 'wheel' if args.binary or not args.source else None, - ))) - - for dist in dists: - build(args.source_dir, dist, args.out_dir) - - -if __name__ == '__main__': - main(parser.parse_args()) diff --git a/pipenv/vendor/pep517/check.py b/pipenv/vendor/pep517/check.py deleted file mode 100644 index 719be04033..0000000000 --- a/pipenv/vendor/pep517/check.py +++ /dev/null @@ -1,207 +0,0 @@ -"""Check a project and backend by attempting to build using PEP 517 hooks. -""" -import argparse -import io -import logging -import os -from os.path import isfile, join as pjoin -import shutil -from subprocess import CalledProcessError -import sys -import tarfile -from tempfile import mkdtemp -import zipfile - -from .colorlog import enable_colourful_output -from .compat import TOMLDecodeError, toml_load -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller - -log = logging.getLogger(__name__) - - -def check_build_sdist(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_sdist({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build sdist in %s', td) - try: - try: - filename = hooks.build_sdist(td, {}) - log.info('build_sdist returned %r', filename) - except Exception: - log.info('Failure in build_sdist', exc_info=True) - return False - - if not filename.endswith('.tar.gz'): - log.error( - "Filename %s doesn't have .tar.gz extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if tarfile.is_tarfile(path): - log.info("Output file is a tar file") - else: - log.error("Output file is not a tar file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check_build_wheel(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_wheel({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build wheel in %s', td) - try: - try: - filename = hooks.build_wheel(td, {}) - log.info('build_wheel returned %r', filename) - except Exception: - log.info('Failure in build_wheel', exc_info=True) - return False - - if not filename.endswith('.whl'): - log.error("Filename %s doesn't have .whl extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if zipfile.is_zipfile(path): - log.info("Output file is a zip file") - else: - log.error("Output file is not a zip file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check(source_dir): - pyproject = pjoin(source_dir, 'pyproject.toml') - if isfile(pyproject): - log.info('Found pyproject.toml') - else: - log.error('Missing pyproject.toml') - return False - - try: - with io.open(pyproject, encoding="utf-8") as f: - pyproject_data = toml_load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - backend_path = buildsys.get('backend-path') - log.info('Loaded pyproject.toml') - except (TOMLDecodeError, KeyError): - log.error("Invalid pyproject.toml", exc_info=True) - return False - - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - sdist_ok = check_build_sdist(hooks, requires) - wheel_ok = check_build_wheel(hooks, requires) - - if not sdist_ok: - log.warning('Sdist checks failed; scroll up to see') - if not wheel_ok: - log.warning('Wheel checks failed') - - return sdist_ok - - -def main(argv=None): - log.warning('pep517.check is deprecated. ' - 'Consider switching to https://pypi.org/project/build/') - - ap = argparse.ArgumentParser() - ap.add_argument( - 'source_dir', - help="A directory containing pyproject.toml") - args = ap.parse_args(argv) - - enable_colourful_output() - - ok = check(args.source_dir) - - if ok: - print(ansi('Checks passed', 'green')) - else: - print(ansi('Checks failed', 'red')) - sys.exit(1) - - -ansi_codes = { - 'reset': '\x1b[0m', - 'bold': '\x1b[1m', - 'red': '\x1b[31m', - 'green': '\x1b[32m', -} - - -def ansi(s, attr): - if os.name != 'nt' and sys.stdout.isatty(): - return ansi_codes[attr] + str(s) + ansi_codes['reset'] - else: - return str(s) - - -if __name__ == '__main__': - main() diff --git a/pipenv/vendor/pep517/colorlog.py b/pipenv/vendor/pep517/colorlog.py deleted file mode 100644 index 69c8a59d3d..0000000000 --- a/pipenv/vendor/pep517/colorlog.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Nicer log formatting with colours. - -Code copied from Tornado, Apache licensed. -""" -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys - -try: - import curses -except ImportError: - curses = None - - -def _stderr_supports_color(): - color = False - if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): - try: - curses.setupterm() - if curses.tigetnum("colors") > 0: - color = True - except Exception: - pass - return color - - -class LogFormatter(logging.Formatter): - """Log formatter with colour support - """ - DEFAULT_COLORS = { - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red - logging.CRITICAL: 1, - } - - def __init__(self, color=True, datefmt=None): - r""" - :arg bool color: Enables color support. - :arg string fmt: Log message format. - It will be applied to the attributes dict of log records. The - text between ``%(color)s`` and ``%(end_color)s`` will be colored - depending on the level if color support is on. - :arg dict colors: color mappings from logging level to terminal color - code - :arg string datefmt: Datetime format. - Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. - .. versionchanged:: 3.2 - Added ``fmt`` and ``datefmt`` arguments. - """ - logging.Formatter.__init__(self, datefmt=datefmt) - self._colors = {} - if color and _stderr_supports_color(): - # The curses module has some str/bytes confusion in - # python3. Until version 3.2.3, most methods return - # bytes, but only accept strings. In addition, we want to - # output these strings with the logging module, which - # works with unicode strings. The explicit calls to - # unicode() below are harmless in python2 but will do the - # right conversion in python 3. - fg_color = (curses.tigetstr("setaf") or - curses.tigetstr("setf") or "") - if (3, 0) < sys.version_info < (3, 2, 3): - fg_color = str(fg_color, "ascii") - - for levelno, code in self.DEFAULT_COLORS.items(): - self._colors[levelno] = str( - curses.tparm(fg_color, code), "ascii") - self._normal = str(curses.tigetstr("sgr0"), "ascii") - - scr = curses.initscr() - self.termwidth = scr.getmaxyx()[1] - curses.endwin() - else: - self._normal = '' - # Default width is usually 80, but too wide is - # worse than too narrow - self.termwidth = 70 - - def formatMessage(self, record): - mlen = len(record.message) - right_text = '{initial}-{name}'.format(initial=record.levelname[0], - name=record.name) - if mlen + len(right_text) < self.termwidth: - space = ' ' * (self.termwidth - (mlen + len(right_text))) - else: - space = ' ' - - if record.levelno in self._colors: - start_color = self._colors[record.levelno] - end_color = self._normal - else: - start_color = end_color = '' - - return record.message + space + start_color + right_text + end_color - - -def enable_colourful_output(level=logging.INFO): - handler = logging.StreamHandler() - handler.setFormatter(LogFormatter()) - logging.root.addHandler(handler) - logging.root.setLevel(level) diff --git a/pipenv/vendor/pep517/compat.py b/pipenv/vendor/pep517/compat.py deleted file mode 100644 index 900f48a28d..0000000000 --- a/pipenv/vendor/pep517/compat.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Python 2/3 compatibility""" -import json -import sys - - -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -# FileNotFoundError - -try: - FileNotFoundError = FileNotFoundError -except NameError: - FileNotFoundError = IOError - - -if sys.version_info < (3, 6): - from pipenv.vendor.toml import load as toml_load # noqa: F401 - from pipenv.vendor.toml import TomlDecodeError as TOMLDecodeError # noqa: F401 -else: - from pipenv.vendor.tomli import load as toml_load # noqa: F401 - from pipenv.vendor.tomli import TOMLDecodeError # noqa: F401 diff --git a/pipenv/vendor/pep517/dirtools.py b/pipenv/vendor/pep517/dirtools.py deleted file mode 100644 index 58c6ca0c56..0000000000 --- a/pipenv/vendor/pep517/dirtools.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import io -import contextlib -import tempfile -import shutil -import errno -import zipfile - - -@contextlib.contextmanager -def tempdir(): - """Create a temporary directory in a context manager.""" - td = tempfile.mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -def mkdir_p(*args, **kwargs): - """Like `mkdir`, but does not raise an exception if the - directory already exists. - """ - try: - return os.mkdir(*args, **kwargs) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise - - -def dir_to_zipfile(root): - """Construct an in-memory zip file for a directory.""" - buffer = io.BytesIO() - zip_file = zipfile.ZipFile(buffer, 'w') - for root, dirs, files in os.walk(root): - for path in dirs: - fs_path = os.path.join(root, path) - rel_path = os.path.relpath(fs_path, root) - zip_file.writestr(rel_path + '/', '') - for path in files: - fs_path = os.path.join(root, path) - rel_path = os.path.relpath(fs_path, root) - zip_file.write(fs_path, rel_path) - return zip_file diff --git a/pipenv/vendor/pep517/envbuild.py b/pipenv/vendor/pep517/envbuild.py deleted file mode 100644 index 7c2344bf3b..0000000000 --- a/pipenv/vendor/pep517/envbuild.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Build wheels/sdists by installing build deps to a temporary environment. -""" - -import io -import os -import logging -import shutil -from subprocess import check_call -import sys -from sysconfig import get_paths -from tempfile import mkdtemp - -from .compat import toml_load -from .wrappers import Pep517HookCaller, LoggerWrapper - -log = logging.getLogger(__name__) - - -def _load_pyproject(source_dir): - with io.open( - os.path.join(source_dir, 'pyproject.toml'), - encoding="utf-8", - ) as f: - pyproject_data = toml_load(f) - buildsys = pyproject_data['build-system'] - return ( - buildsys['requires'], - buildsys['build-backend'], - buildsys.get('backend-path'), - ) - - -class BuildEnvironment(object): - """Context manager to install build deps in a simple temporary environment - - Based on code I wrote for pip, which is MIT licensed. - """ - # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - path = None - - def __init__(self, cleanup=True): - self._cleanup = cleanup - - def __enter__(self): - self.path = mkdtemp(prefix='pep517-build-env-') - log.info('Temporary build environment: %s', self.path) - - self.save_path = os.environ.get('PATH', None) - self.save_pythonpath = os.environ.get('PYTHONPATH', None) - - install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' - install_dirs = get_paths(install_scheme, vars={ - 'base': self.path, - 'platbase': self.path, - }) - - scripts = install_dirs['scripts'] - if self.save_path: - os.environ['PATH'] = scripts + os.pathsep + self.save_path - else: - os.environ['PATH'] = scripts + os.pathsep + os.defpath - - if install_dirs['purelib'] == install_dirs['platlib']: - lib_dirs = install_dirs['purelib'] - else: - lib_dirs = install_dirs['purelib'] + os.pathsep + \ - install_dirs['platlib'] - if self.save_pythonpath: - os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ - self.save_pythonpath - else: - os.environ['PYTHONPATH'] = lib_dirs - - return self - - def pip_install(self, reqs): - """Install dependencies into this env by calling pip in a subprocess""" - if not reqs: - return - log.info('Calling pip to install %s', reqs) - cmd = [ - sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs) - check_call( - cmd, - stdout=LoggerWrapper(log, logging.INFO), - stderr=LoggerWrapper(log, logging.ERROR), - ) - - def __exit__(self, exc_type, exc_val, exc_tb): - needs_cleanup = ( - self._cleanup and - self.path is not None and - os.path.isdir(self.path) - ) - if needs_cleanup: - shutil.rmtree(self.path) - - if self.save_path is None: - os.environ.pop('PATH', None) - else: - os.environ['PATH'] = self.save_path - - if self.save_pythonpath is None: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = self.save_pythonpath - - -def build_wheel(source_dir, wheel_dir, config_settings=None): - """Build a wheel from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str wheel_dir: Target directory to create wheel in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_wheel(config_settings) - env.pip_install(reqs) - return hooks.build_wheel(wheel_dir, config_settings) - - -def build_sdist(source_dir, sdist_dir, config_settings=None): - """Build an sdist from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str sdist_dir: Target directory to place sdist in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_sdist(config_settings) - env.pip_install(reqs) - return hooks.build_sdist(sdist_dir, config_settings) diff --git a/pipenv/vendor/pep517/in_process/__init__.py b/pipenv/vendor/pep517/in_process/__init__.py deleted file mode 100644 index c932313b32..0000000000 --- a/pipenv/vendor/pep517/in_process/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -"""This is a subpackage because the directory is on sys.path for _in_process.py - -The subpackage should stay as empty as possible to avoid shadowing modules that -the backend might import. -""" -from os.path import dirname, abspath, join as pjoin -from contextlib import contextmanager - -try: - import importlib.resources as resources - - def _in_proc_script_path(): - return resources.path(__package__, '_in_process.py') -except ImportError: - @contextmanager - def _in_proc_script_path(): - yield pjoin(dirname(abspath(__file__)), '_in_process.py') diff --git a/pipenv/vendor/pep517/in_process/_in_process.py b/pipenv/vendor/pep517/in_process/_in_process.py deleted file mode 100644 index c7f5f0577f..0000000000 --- a/pipenv/vendor/pep517/in_process/_in_process.py +++ /dev/null @@ -1,349 +0,0 @@ -"""This is invoked in a subprocess to call the build backend hooks. - -It expects: -- Command line args: hook_name, control_dir -- Environment variables: - PEP517_BUILD_BACKEND=entry.point:spec - PEP517_BACKEND_PATH=paths (separated with os.pathsep) -- control_dir/input.json: - - {"kwargs": {...}} - -Results: -- control_dir/output.json - - {"return_val": ...} -""" -from glob import glob -from importlib import import_module -import json -import os -import os.path -from os.path import join as pjoin -import re -import shutil -import sys -import traceback - -# This file is run as a script, and `import compat` is not zip-safe, so we -# include write_json() and read_json() from compat.py. -# -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -class BackendUnavailable(Exception): - """Raised if we cannot import the backend""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Raised if the backend is invalid""" - def __init__(self, message): - self.message = message - - -class HookMissing(Exception): - """Raised if a hook is missing and we are not executing the fallback""" - def __init__(self, hook_name=None): - super(HookMissing, self).__init__(hook_name) - self.hook_name = hook_name - - -def contained_in(filename, directory): - """Test if a file is located within the given directory.""" - filename = os.path.normcase(os.path.abspath(filename)) - directory = os.path.normcase(os.path.abspath(directory)) - return os.path.commonprefix([filename, directory]) == directory - - -def _build_backend(): - """Find and load the build backend""" - # Add in-tree backend directories to the front of sys.path. - backend_path = os.environ.get('PEP517_BACKEND_PATH') - if backend_path: - extra_pathitems = backend_path.split(os.pathsep) - sys.path[:0] = extra_pathitems - - ep = os.environ['PEP517_BUILD_BACKEND'] - mod_path, _, obj_path = ep.partition(':') - try: - obj = import_module(mod_path) - except ImportError: - raise BackendUnavailable(traceback.format_exc()) - - if backend_path: - if not any( - contained_in(obj.__file__, path) - for path in extra_pathitems - ): - raise BackendInvalid("Backend was not loaded from backend-path") - - if obj_path: - for path_part in obj_path.split('.'): - obj = getattr(obj, path_part) - return obj - - -def get_requires_for_build_wheel(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_wheel - except AttributeError: - return [] - else: - return hook(config_settings) - - -def get_requires_for_build_editable(config_settings): - """Invoke the optional get_requires_for_build_editable hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_editable - except AttributeError: - return [] - else: - return hook(config_settings) - - -def prepare_metadata_for_build_wheel( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_wheel - - Implements a fallback by building a wheel if the hook isn't defined, - unless _allow_fallback is False in which case HookMissing is raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_wheel - except AttributeError: - if not _allow_fallback: - raise HookMissing() - whl_basename = backend.build_wheel(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -def prepare_metadata_for_build_editable( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_editable - - Implements a fallback by building an editable wheel if the hook isn't - defined, unless _allow_fallback is False in which case HookMissing is - raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_editable - except AttributeError: - if not _allow_fallback: - raise HookMissing() - try: - build_hook = backend.build_editable - except AttributeError: - raise HookMissing(hook_name='build_editable') - else: - whl_basename = build_hook(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, - metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' - - -def _dist_info_files(whl_zip): - """Identify the .dist-info folder inside a wheel ZipFile.""" - res = [] - for path in whl_zip.namelist(): - m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) - if m: - res.append(path) - if res: - return res - raise Exception("No .dist-info folder found in wheel") - - -def _get_wheel_metadata_from_wheel( - whl_basename, metadata_directory, config_settings): - """Extract the metadata from a wheel. - - Fallback for when the build backend does not - define the 'get_wheel_metadata' hook. - """ - from zipfile import ZipFile - with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): - pass # Touch marker file - - whl_file = os.path.join(metadata_directory, whl_basename) - with ZipFile(whl_file) as zipf: - dist_info = _dist_info_files(zipf) - zipf.extractall(path=metadata_directory, members=dist_info) - return dist_info[0].split('/')[0] - - -def _find_already_built_wheel(metadata_directory): - """Check for a wheel already built during the get_wheel_metadata hook. - """ - if not metadata_directory: - return None - metadata_parent = os.path.dirname(metadata_directory) - if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): - return None - - whl_files = glob(os.path.join(metadata_parent, '*.whl')) - if not whl_files: - print('Found wheel built marker, but no .whl files') - return None - if len(whl_files) > 1: - print('Found multiple .whl files; unspecified behaviour. ' - 'Will call build_wheel.') - return None - - # Exactly one .whl file - return whl_files[0] - - -def build_wheel(wheel_directory, config_settings, metadata_directory=None): - """Invoke the mandatory build_wheel hook. - - If a wheel was already built in the - prepare_metadata_for_build_wheel fallback, this - will copy it rather than rebuilding the wheel. - """ - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return _build_backend().build_wheel(wheel_directory, config_settings, - metadata_directory) - - -def build_editable(wheel_directory, config_settings, metadata_directory=None): - """Invoke the optional build_editable hook. - - If a wheel was already built in the - prepare_metadata_for_build_editable fallback, this - will copy it rather than rebuilding the wheel. - """ - backend = _build_backend() - try: - hook = backend.build_editable - except AttributeError: - raise HookMissing() - else: - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return hook(wheel_directory, config_settings, metadata_directory) - - -def get_requires_for_build_sdist(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_sdist - except AttributeError: - return [] - else: - return hook(config_settings) - - -class _DummyException(Exception): - """Nothing should ever raise this exception""" - - -class GotUnsupportedOperation(Exception): - """For internal use when backend raises UnsupportedOperation""" - def __init__(self, traceback): - self.traceback = traceback - - -def build_sdist(sdist_directory, config_settings): - """Invoke the mandatory build_sdist hook.""" - backend = _build_backend() - try: - return backend.build_sdist(sdist_directory, config_settings) - except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation(traceback.format_exc()) - - -HOOK_NAMES = { - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'get_requires_for_build_editable', - 'prepare_metadata_for_build_editable', - 'build_editable', - 'get_requires_for_build_sdist', - 'build_sdist', -} - - -def main(): - if len(sys.argv) < 3: - sys.exit("Needs args: hook_name, control_dir") - hook_name = sys.argv[1] - control_dir = sys.argv[2] - if hook_name not in HOOK_NAMES: - sys.exit("Unknown hook: %s" % hook_name) - hook = globals()[hook_name] - - hook_input = read_json(pjoin(control_dir, 'input.json')) - - json_out = {'unsupported': False, 'return_val': None} - try: - json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable as e: - json_out['no_backend'] = True - json_out['traceback'] = e.traceback - except BackendInvalid as e: - json_out['backend_invalid'] = True - json_out['backend_error'] = e.message - except GotUnsupportedOperation as e: - json_out['unsupported'] = True - json_out['traceback'] = e.traceback - except HookMissing as e: - json_out['hook_missing'] = True - json_out['missing_hook_name'] = e.hook_name or hook_name - - write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) - - -if __name__ == '__main__': - main() diff --git a/pipenv/vendor/pep517/meta.py b/pipenv/vendor/pep517/meta.py deleted file mode 100644 index d9dba48710..0000000000 --- a/pipenv/vendor/pep517/meta.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Build metadata for a project using PEP 517 hooks. -""" -import argparse -import logging -import os -import shutil -import functools - -try: - import importlib.metadata as imp_meta -except ImportError: - import importlib_metadata as imp_meta - -try: - from zipfile import Path -except ImportError: - from pipenv.vendor.zipp import Path - -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller, quiet_subprocess_runner -from .dirtools import tempdir, mkdir_p, dir_to_zipfile -from .build import validate_system, load_system, compat_system - -log = logging.getLogger(__name__) - - -def _prep_meta(hooks, env, dest): - reqs = hooks.get_requires_for_build_wheel({}) - log.info('Got build requires: %s', reqs) - - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - - with tempdir() as td: - log.info('Trying to build metadata in %s', td) - filename = hooks.prepare_metadata_for_build_wheel(td, {}) - source = os.path.join(td, filename) - shutil.move(source, os.path.join(dest, os.path.basename(filename))) - - -def build(source_dir='.', dest=None, system=None): - system = system or load_system(source_dir) - dest = os.path.join(source_dir, dest or 'dist') - mkdir_p(dest) - validate_system(system) - hooks = Pep517HookCaller( - source_dir, system['build-backend'], system.get('backend-path') - ) - - with hooks.subprocess_runner(quiet_subprocess_runner): - with BuildEnvironment() as env: - env.pip_install(system['requires']) - _prep_meta(hooks, env, dest) - - -def build_as_zip(builder=build): - with tempdir() as out_dir: - builder(dest=out_dir) - return dir_to_zipfile(out_dir) - - -def load(root): - """ - Given a source directory (root) of a package, - return an importlib.metadata.Distribution object - with metadata build from that package. - """ - root = os.path.expanduser(root) - system = compat_system(root) - builder = functools.partial(build, source_dir=root, system=system) - path = Path(build_as_zip(builder)) - return imp_meta.PathDistribution(path) - - -parser = argparse.ArgumentParser() -parser.add_argument( - 'source_dir', - help="A directory containing pyproject.toml", -) -parser.add_argument( - '--out-dir', '-o', - help="Destination in which to save the builds relative to source dir", -) - - -def main(): - args = parser.parse_args() - build(args.source_dir, args.out_dir) - - -if __name__ == '__main__': - main() diff --git a/pipenv/vendor/pep517/wrappers.py b/pipenv/vendor/pep517/wrappers.py deleted file mode 100644 index 52da22e825..0000000000 --- a/pipenv/vendor/pep517/wrappers.py +++ /dev/null @@ -1,371 +0,0 @@ -import threading -from contextlib import contextmanager -import os -from os.path import abspath, join as pjoin -import shutil -from subprocess import check_call, check_output, STDOUT -import sys -from tempfile import mkdtemp - -from . import compat -from .in_process import _in_proc_script_path - -__all__ = [ - 'BackendUnavailable', - 'BackendInvalid', - 'HookMissing', - 'UnsupportedOperation', - 'default_subprocess_runner', - 'quiet_subprocess_runner', - 'Pep517HookCaller', -] - - -@contextmanager -def tempdir(): - td = mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -class BackendUnavailable(Exception): - """Will be raised if the backend cannot be imported in the hook process.""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Will be raised if the backend is invalid.""" - def __init__(self, backend_name, backend_path, message): - self.backend_name = backend_name - self.backend_path = backend_path - self.message = message - - -class HookMissing(Exception): - """Will be raised on missing hooks.""" - def __init__(self, hook_name): - super(HookMissing, self).__init__(hook_name) - self.hook_name = hook_name - - -class UnsupportedOperation(Exception): - """May be raised by build_sdist if the backend indicates that it can't.""" - def __init__(self, traceback): - self.traceback = traceback - - -def default_subprocess_runner(cmd, cwd=None, extra_environ=None): - """The default method of calling the wrapper subprocess.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_call(cmd, cwd=cwd, env=env) - - -def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): - """A method of calling the wrapper subprocess while suppressing output.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) - - -def norm_and_check(source_tree, requested): - """Normalise and check a backend path. - - Ensure that the requested backend path is specified as a relative path, - and resolves to a location under the given source tree. - - Return an absolute version of the requested path. - """ - if os.path.isabs(requested): - raise ValueError("paths must be relative") - - abs_source = os.path.abspath(source_tree) - abs_requested = os.path.normpath(os.path.join(abs_source, requested)) - # We have to use commonprefix for Python 2.7 compatibility. So we - # normalise case to avoid problems because commonprefix is a character - # based comparison :-( - norm_source = os.path.normcase(abs_source) - norm_requested = os.path.normcase(abs_requested) - if os.path.commonprefix([norm_source, norm_requested]) != norm_source: - raise ValueError("paths must be inside source tree") - - return abs_requested - - -class Pep517HookCaller(object): - """A wrapper around a source directory to be built with a PEP 517 backend. - - :param source_dir: The path to the source directory, containing - pyproject.toml. - :param build_backend: The build backend spec, as per PEP 517, from - pyproject.toml. - :param backend_path: The backend path, as per PEP 517, from pyproject.toml. - :param runner: A callable that invokes the wrapper subprocess. - :param python_executable: The Python executable used to invoke the backend - - The 'runner', if provided, must expect the following: - - - cmd: a list of strings representing the command and arguments to - execute, as would be passed to e.g. 'subprocess.check_call'. - - cwd: a string representing the working directory that must be - used for the subprocess. Corresponds to the provided source_dir. - - extra_environ: a dict mapping environment variable names to values - which must be set for the subprocess execution. - """ - def __init__( - self, - source_dir, - build_backend, - backend_path=None, - runner=None, - python_executable=None, - ): - if runner is None: - runner = default_subprocess_runner - - self.source_dir = abspath(source_dir) - self.build_backend = build_backend - if backend_path: - backend_path = [ - norm_and_check(self.source_dir, p) for p in backend_path - ] - self.backend_path = backend_path - self._subprocess_runner = runner - if not python_executable: - python_executable = sys.executable - self.python_executable = python_executable - - @contextmanager - def subprocess_runner(self, runner): - """A context manager for temporarily overriding the default subprocess - runner. - """ - prev = self._subprocess_runner - self._subprocess_runner = runner - try: - yield - finally: - self._subprocess_runner = prev - - def get_requires_for_build_wheel(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_wheel', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None, - _allow_fallback=True): - """Prepare a ``*.dist-info`` folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that (unless _allow_fallback is - False). - """ - return self._call_hook('prepare_metadata_for_build_wheel', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - '_allow_fallback': _allow_fallback, - }) - - def build_wheel( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build a wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_wheel' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_wheel', and the same metadata_directory is - used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_wheel', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_editable(self, config_settings=None): - """Identify packages required for building an editable wheel - - Returns a list of dependency specifications, e.g.:: - - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_editable', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_editable( - self, metadata_directory, config_settings=None, - _allow_fallback=True): - """Prepare a ``*.dist-info`` folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build an editable - wheel, and the dist-info extracted from that (unless _allow_fallback is - False). - """ - return self._call_hook('prepare_metadata_for_build_editable', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - '_allow_fallback': _allow_fallback, - }) - - def build_editable( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build an editable wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_editable' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_editable', and the same metadata_directory - is used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_editable', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_sdist(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["setuptools >= 26"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_sdist', { - 'config_settings': config_settings - }) - - def build_sdist(self, sdist_directory, config_settings=None): - """Build an sdist from this project. - - Returns the name of the newly created file. - - This calls the 'build_sdist' backend hook in a subprocess. - """ - return self._call_hook('build_sdist', { - 'sdist_directory': abspath(sdist_directory), - 'config_settings': config_settings, - }) - - def _call_hook(self, hook_name, kwargs): - # On Python 2, pytoml returns Unicode values (which is correct) but the - # environment passed to check_call needs to contain string values. We - # convert here by encoding using ASCII (the backend can only contain - # letters, digits and _, . and : characters, and will be used as a - # Python identifier, so non-ASCII content is wrong on Python 2 in - # any case). - # For backend_path, we use sys.getfilesystemencoding. - if sys.version_info[0] == 2: - build_backend = self.build_backend.encode('ASCII') - else: - build_backend = self.build_backend - extra_environ = {'PEP517_BUILD_BACKEND': build_backend} - - if self.backend_path: - backend_path = os.pathsep.join(self.backend_path) - if sys.version_info[0] == 2: - backend_path = backend_path.encode(sys.getfilesystemencoding()) - extra_environ['PEP517_BACKEND_PATH'] = backend_path - - with tempdir() as td: - hook_input = {'kwargs': kwargs} - compat.write_json(hook_input, pjoin(td, 'input.json'), - indent=2) - - # Run the hook in a subprocess - with _in_proc_script_path() as script: - python = self.python_executable - self._subprocess_runner( - [python, abspath(str(script)), hook_name, td], - cwd=self.source_dir, - extra_environ=extra_environ - ) - - data = compat.read_json(pjoin(td, 'output.json')) - if data.get('unsupported'): - raise UnsupportedOperation(data.get('traceback', '')) - if data.get('no_backend'): - raise BackendUnavailable(data.get('traceback', '')) - if data.get('backend_invalid'): - raise BackendInvalid( - backend_name=self.build_backend, - backend_path=self.backend_path, - message=data.get('backend_error', '') - ) - if data.get('hook_missing'): - raise HookMissing(data.get('missing_hook_name') or hook_name) - return data['return_val'] - - -class LoggerWrapper(threading.Thread): - """ - Read messages from a pipe and redirect them - to a logger (see python's logging module). - """ - - def __init__(self, logger, level): - threading.Thread.__init__(self) - self.daemon = True - - self.logger = logger - self.level = level - - # create the pipe and reader - self.fd_read, self.fd_write = os.pipe() - self.reader = os.fdopen(self.fd_read) - - self.start() - - def fileno(self): - return self.fd_write - - @staticmethod - def remove_newline(msg): - return msg[:-1] if msg.endswith(os.linesep) else msg - - def run(self): - for line in self.reader: - self._write(self.remove_newline(line)) - - def _write(self, message): - self.logger.log(self.level, message) diff --git a/pipenv/vendor/requirementslib/models/setup_info.py b/pipenv/vendor/requirementslib/models/setup_info.py index 69de266e28..5a4c30a8a4 100644 --- a/pipenv/vendor/requirementslib/models/setup_info.py +++ b/pipenv/vendor/requirementslib/models/setup_info.py @@ -17,8 +17,8 @@ from weakref import finalize import pipenv.vendor.attr as attr -import pep517.envbuild -import pep517.wrappers +from pipenv.patched.pip._vendor.pep517 import envbuild +from pipenv.patched.pip._vendor.pep517 import wrappers from pipenv.vendor.distlib.wheel import Wheel from pipenv.patched.pip._vendor.packaging.markers import Marker from pipenv.patched.pip._vendor.packaging.specifiers import SpecifierSet @@ -114,7 +114,7 @@ def pep517_subprocess_runner(cmd, cwd=None, extra_environ=None): ) -class BuildEnv(pep517.envbuild.BuildEnvironment): +class BuildEnv(envbuild.BuildEnvironment): def pip_install(self, reqs): cmd = [ sys.executable, @@ -135,7 +135,7 @@ def pip_install(self, reqs): ) -class HookCaller(pep517.wrappers.Pep517HookCaller): +class HookCaller(wrappers.Pep517HookCaller): def __init__(self, source_dir, build_backend, backend_path=None): super().__init__(source_dir, build_backend, backend_path=backend_path) self.source_dir = os.path.abspath(source_dir) @@ -143,7 +143,7 @@ def __init__(self, source_dir, build_backend, backend_path=None): self._subprocess_runner = pep517_subprocess_runner if backend_path: backend_path = [ - pep517.wrappers.norm_and_check(self.source_dir, p) for p in backend_path + wrappers.norm_and_check(self.source_dir, p) for p in backend_path ] self.backend_path = backend_path diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 3cba6133ab..d01761df3c 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -12,7 +12,6 @@ iso8601==0.1.16 markupsafe==2.0.1 orderedmultidict==1.0.1 parse==1.19.0 -pep517==0.11.0 pexpect==4.8.0 pip-shims==0.7.3 pipdeptree==2.2.1 diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index 1eda7ea147..d93ac782fc 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -68,6 +68,16 @@ (r"(? Date: Sun, 7 Aug 2022 18:09:17 -0400 Subject: [PATCH 035/200] Vendoring in pip 22.2.2 --- pipenv/patched/patched.txt | 2 +- pipenv/patched/pip/__init__.py | 2 +- pipenv/patched/pip/__pip-runner__.py | 1 - pipenv/patched/pip/_internal/index/collector.py | 1 + pipenv/patched/pip/_internal/locations/__init__.py | 14 ++++++-------- .../patched/pip/_internal/locations/_distutils.py | 2 +- pipenv/patched/pip/_internal/utils/deprecation.py | 2 +- 7 files changed, 11 insertions(+), 13 deletions(-) diff --git a/pipenv/patched/patched.txt b/pipenv/patched/patched.txt index 1eab502e59..8ea0653352 100644 --- a/pipenv/patched/patched.txt +++ b/pipenv/patched/patched.txt @@ -1,3 +1,3 @@ -pip==22.2.1 +pip==22.2.2 pipfile==0.0.2 safety==2.1.1 diff --git a/pipenv/patched/pip/__init__.py b/pipenv/patched/pip/__init__.py index 87bff02da6..1a7e43e182 100644 --- a/pipenv/patched/pip/__init__.py +++ b/pipenv/patched/pip/__init__.py @@ -1,6 +1,6 @@ from typing import List, Optional -__version__ = "22.2.1" +__version__ = "22.2.2" def main(args: Optional[List[str]] = None) -> int: diff --git a/pipenv/patched/pip/__pip-runner__.py b/pipenv/patched/pip/__pip-runner__.py index 280e99f2f0..14026c0d13 100644 --- a/pipenv/patched/pip/__pip-runner__.py +++ b/pipenv/patched/pip/__pip-runner__.py @@ -30,7 +30,6 @@ def find_spec( return spec -# TODO https://github.com/pypa/pip/issues/11294 sys.meta_path.insert(0, PipImportRedirectingFinder()) assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module" diff --git a/pipenv/patched/pip/_internal/index/collector.py b/pipenv/patched/pip/_internal/index/collector.py index 280edc3414..596a9d6953 100644 --- a/pipenv/patched/pip/_internal/index/collector.py +++ b/pipenv/patched/pip/_internal/index/collector.py @@ -345,6 +345,7 @@ def parse_links(page: "IndexContent") -> Iterable[Link]: yanked_reason=yanked_reason, hashes=file.get("hashes", {}), ) + return parser = HTMLLinkParser(page.url) encoding = page.encoding or "utf-8" diff --git a/pipenv/patched/pip/_internal/locations/__init__.py b/pipenv/patched/pip/_internal/locations/__init__.py index c37d55f7a6..23eaea6423 100644 --- a/pipenv/patched/pip/_internal/locations/__init__.py +++ b/pipenv/patched/pip/_internal/locations/__init__.py @@ -60,6 +60,12 @@ def _should_use_sysconfig() -> bool: _USE_SYSCONFIG = _should_use_sysconfig() +if not _USE_SYSCONFIG: + # Import distutils lazily to avoid deprecation warnings, + # but import it soon enough that it is in memory and available during + # a pip reinstall. + from . import _distutils + # Be noisy about incompatibilities if this platforms "should" be using # sysconfig, but is explicitly opting out and using distutils instead. if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: @@ -241,8 +247,6 @@ def get_scheme( if _USE_SYSCONFIG: return new - from . import _distutils - old = _distutils.get_scheme( dist_name, user=user, @@ -407,8 +411,6 @@ def get_bin_prefix() -> str: if _USE_SYSCONFIG: return new - from . import _distutils - old = _distutils.get_bin_prefix() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): _log_context() @@ -442,8 +444,6 @@ def get_purelib() -> str: if _USE_SYSCONFIG: return new - from . import _distutils - old = _distutils.get_purelib() if _looks_like_deb_system_dist_packages(old): return old @@ -488,8 +488,6 @@ def get_prefixed_libs(prefix: str) -> List[str]: if _USE_SYSCONFIG: return _deduplicated(new_pure, new_plat) - from . import _distutils - old_pure, old_plat = _distutils.get_prefixed_libs(prefix) old_lib_paths = _deduplicated(old_pure, old_plat) diff --git a/pipenv/patched/pip/_internal/locations/_distutils.py b/pipenv/patched/pip/_internal/locations/_distutils.py index b2d5da00f0..a708e145ce 100644 --- a/pipenv/patched/pip/_internal/locations/_distutils.py +++ b/pipenv/patched/pip/_internal/locations/_distutils.py @@ -11,7 +11,7 @@ # rationale for why this is done within pip. try: __import__("_distutils_hack").remove_shim() -except ImportError: +except (ImportError, AttributeError): pass import logging diff --git a/pipenv/patched/pip/_internal/utils/deprecation.py b/pipenv/patched/pip/_internal/utils/deprecation.py index a70d491042..fc4424117a 100644 --- a/pipenv/patched/pip/_internal/utils/deprecation.py +++ b/pipenv/patched/pip/_internal/utils/deprecation.py @@ -13,7 +13,7 @@ DEPRECATION_MSG_PREFIX = "DEPRECATION: " -class PipDeprecationWarning(DeprecationWarning): +class PipDeprecationWarning(Warning): pass From 61aed95cc6bc849b2d6179cdd24de813497e294e Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 7 Aug 2022 18:13:13 -0400 Subject: [PATCH 036/200] Add news fragment. --- news/5230.vendor.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5230.vendor.rst diff --git a/news/5230.vendor.rst b/news/5230.vendor.rst new file mode 100644 index 0000000000..6994c6b117 --- /dev/null +++ b/news/5230.vendor.rst @@ -0,0 +1 @@ +Vendor in minor ``pip`` update ``22.2.2`` From 6ce90365a2e3b17e4b38a1e73bbd878d6e40007c Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Mon, 8 Aug 2022 23:00:53 +0200 Subject: [PATCH 037/200] Drop zipp from vendored libs. Zipp was introduces as a dependency of pep517. However, we are now using pep517 from `pip._vendor`. The version included there isn't using any code path that requires zipp. Hence, it is excluded form `pip._vendor`, and can be safely removed. --- pipenv/vendor/vendor.txt | 1 - pipenv/vendor/zipp.LICENSE | 19 --- pipenv/vendor/zipp.py | 326 ------------------------------------- 3 files changed, 346 deletions(-) delete mode 100644 pipenv/vendor/zipp.LICENSE delete mode 100644 pipenv/vendor/zipp.py diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index d01761df3c..a0b4812ec6 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -33,4 +33,3 @@ tomlkit==0.9.2 vistir==0.5.6 wheel==0.36.2 yaspin==2.0.0 -zipp==3.5.0 diff --git a/pipenv/vendor/zipp.LICENSE b/pipenv/vendor/zipp.LICENSE deleted file mode 100644 index 353924be0e..0000000000 --- a/pipenv/vendor/zipp.LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright Jason R. Coombs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/pipenv/vendor/zipp.py b/pipenv/vendor/zipp.py deleted file mode 100644 index 69cdaad4a9..0000000000 --- a/pipenv/vendor/zipp.py +++ /dev/null @@ -1,326 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) From b7f01ce218b2bc2a13e8b7869ce7c9c7348ef036 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Mon, 8 Aug 2022 10:54:29 +0200 Subject: [PATCH 038/200] Document all environment variables Removed the initialize method. This was masking sphinx autodoc. Also it was directly used inside the __init__ method. Fix #5201 --- pipenv/environments.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pipenv/environments.py b/pipenv/environments.py index 7f1eb8bfae..7e08876203 100644 --- a/pipenv/environments.py +++ b/pipenv/environments.py @@ -107,19 +107,20 @@ def normalize_pipfile_path(p): class Setting: + """ + Control various settings of pipenv via environment variables. + """ + def __init__(self) -> None: - self.USING_DEFAULT_PYTHON = True - self.initialize() - def initialize(self): + self.USING_DEFAULT_PYTHON = True + """Use the default Python""" + #: Location for Pipenv to store it's package cache. + #: Default is to use appdir's user cache directory. self.PIPENV_CACHE_DIR = os.environ.get( "PIPENV_CACHE_DIR", user_cache_dir("pipenv") ) - """Location for Pipenv to store it's package cache. - - Default is to use appdir's user cache directory. - """ # Tells Pipenv which Python to default to, when none is provided. self.PIPENV_DEFAULT_PYTHON_VERSION = os.environ.get( From da2c98aeda8b6bf35bf768580d324c29c27bff9d Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Mon, 8 Aug 2022 22:54:24 +0200 Subject: [PATCH 039/200] Add news snippet --- news/5235.doc.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5235.doc.rst diff --git a/news/5235.doc.rst b/news/5235.doc.rst new file mode 100644 index 0000000000..54c3dd93c6 --- /dev/null +++ b/news/5235.doc.rst @@ -0,0 +1 @@ +Add documentation for environment variables the configure pipenv. From eb6b4e102201458b7f54df65a2c7c2adcfa24669 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Mon, 8 Aug 2022 23:07:01 +0200 Subject: [PATCH 040/200] Replace call to project.s.initialize --- pipenv/core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pipenv/core.py b/pipenv/core.py index 113ef9e5be..d6c4e8c313 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -146,7 +146,8 @@ def load_dot_env(project, as_dict=False, quiet=False): err=True, ) dotenv.load_dotenv(dotenv_file, override=True) - project.s.initialize() + + project.s = environments.Setting() def cleanup_virtualenv(project, bare=True): From 777bb4ed9fd56fd2be5767d53094fcc8a4e54070 Mon Sep 17 00:00:00 2001 From: Jeremy Fleischman Date: Tue, 9 Aug 2022 18:46:57 -0700 Subject: [PATCH 041/200] Fix typo: the latest version of pipenv is 2022.8.5 (#5238) --- CHANGELOG.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 294e5d9d88..289a043fe0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,5 @@ -2022.7.24 (2022-08-05) -====================== +2022.8.5 (2022-08-05) +===================== Features & Improvements From 1b2d7efc8e80476dc0361517e491719fdcf51f78 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 12 Aug 2022 20:14:38 -0400 Subject: [PATCH 042/200] Remove code that is not required but may be causing the mac OS test failures. (#5241) --- pipenv/project.py | 6 ------ tests/pypi | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/pipenv/project.py b/pipenv/project.py index b219907fb4..a78334305b 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -35,7 +35,6 @@ from pipenv.utils.shell import ( find_requirements, find_windows_executable, - get_pipenv_dist, get_workon_home, is_virtual_environment, looks_like_dir, @@ -289,11 +288,6 @@ def get_environment(self, allow_global: bool = False) -> Environment: pipfile=self.parsed_pipfile, project=self, ) - pipenv_dist = get_pipenv_dist(pkg="pipenv") - if pipenv_dist: - environment.extend_dists(pipenv_dist) - else: - environment.add_dist("pipenv") return environment @property diff --git a/tests/pypi b/tests/pypi index d374a3bd90..f553001342 160000 --- a/tests/pypi +++ b/tests/pypi @@ -1 +1 @@ -Subproject commit d374a3bd90485d30027952236b676cb1fdfaba9a +Subproject commit f5530013426d6392d67cd1703f379d20a768c1cf From 6891485a9524d57dae9fb8585e8ee1951b706a67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C6=B0=C6=A1ng=20Qu=E1=BB=91c=20Kh=C3=A1nh?= Date: Sat, 13 Aug 2022 18:17:09 +0900 Subject: [PATCH 043/200] Issue 4371 incorrect dependencies when install dev packages (#5234) * Add test, ensure dev lock use default packages as constraints. * Use default packages as constraints when locking develop packages. * Add test, ensure installing dev-packages use default packages as constraints. (#4371) (#2987) * Use default packages as constraints when installing provided dev packages. * change vistir.path.normalize_path to pipenv.utils.shell.normalize_path * Add function that get contraints from packages. * Add test for get_constraints_from_deps function * Use get_constraints_from_deps to get constraints * Use @cached_property instead of @property * Use standalone utility to write constraints file * prepare_constraint_file use precomputed constraints. * Add news fragment. --- news/5234.bugfix.rst | 2 + pipenv/core.py | 24 ++++++- pipenv/resolver.py | 6 +- pipenv/utils/dependencies.py | 54 +++++++++++++++ pipenv/utils/resolver.py | 87 +++++++++++++++++-------- tests/integration/test_install_basic.py | 26 ++++++++ tests/integration/test_lock.py | 35 ++++++++++ tests/unit/test_utils.py | 14 ++++ 8 files changed, 217 insertions(+), 31 deletions(-) create mode 100644 news/5234.bugfix.rst diff --git a/news/5234.bugfix.rst b/news/5234.bugfix.rst new file mode 100644 index 0000000000..83cbad392d --- /dev/null +++ b/news/5234.bugfix.rst @@ -0,0 +1,2 @@ +Use ``packages`` as contraints when locking ``dev-packages`` in Pipfile. +Use ``packages`` as contraints when installing new ``dev-packages``. diff --git a/pipenv/core.py b/pipenv/core.py index d6c4e8c313..22895e72ac 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -28,10 +28,12 @@ from pipenv.utils.dependencies import ( convert_deps_to_pip, get_canonical_names, + get_constraints_from_deps, is_pinned, is_required_version, is_star, pep423_name, + prepare_constraint_file, python_version, ) from pipenv.utils.indexes import get_source_list, parse_indexes, prepare_pip_source_args @@ -41,6 +43,7 @@ cmd_list_to_shell, find_python, is_python_command, + normalize_path, project_python, subprocess_run, system_which, @@ -789,6 +792,7 @@ def batch_install( trusted_hosts=trusted_hosts, extra_indexes=extra_indexes, use_pep517=use_pep517, + use_constraint=False, # no need to use constraints, it's written in lockfile ) c.dep = dep @@ -1096,8 +1100,9 @@ def do_lock( for k, v in lockfile[section].copy().items(): if not hasattr(v, "keys"): del lockfile[section][k] - # Resolve dev-package dependencies followed by packages dependencies. - for is_dev in [True, False]: + + # Resolve package to generate constraints before resolving dev-packages + for is_dev in [False, True]: pipfile_section = "dev-packages" if is_dev else "packages" if project.pipfile_exists: packages = project.parsed_pipfile.get(pipfile_section, {}) @@ -1453,12 +1458,14 @@ def pip_install( block=True, index=None, pre=False, + dev=False, selective_upgrade=False, requirements_dir=None, extra_indexes=None, pypi_mirror=None, trusted_hosts=None, use_pep517=True, + use_constraint=False, ): piplogger = logging.getLogger("pipenv.patched.pip._internal.commands.install") if not trusted_hosts: @@ -1539,9 +1546,18 @@ def pip_install( ) pip_command.extend(pip_args) if r: - pip_command.extend(["-r", vistir.path.normalize_path(r)]) + pip_command.extend(["-r", normalize_path(r)]) elif line: pip_command.extend(line) + if dev and use_constraint: + default_constraints = get_constraints_from_deps(project.packages) + constraint_filename = prepare_constraint_file( + default_constraints, + directory=requirements_dir, + sources=None, + pip_args=None, + ) + pip_command.extend(["-c", normalize_path(constraint_filename)]) pip_command.extend(prepare_pip_source_args(sources)) if project.s.is_verbose(): click.echo(f"$ {cmd_list_to_shell(pip_command)}", err=True) @@ -2128,9 +2144,11 @@ def do_install( selective_upgrade=selective_upgrade, no_deps=False, pre=pre, + dev=dev, requirements_dir=requirements_directory, index=index_url, pypi_mirror=pypi_mirror, + use_constraint=True, ) if c.returncode: sp.write_err( diff --git a/pipenv/resolver.py b/pipenv/resolver.py index 9cce20c18c..2866bee115 100644 --- a/pipenv/resolver.py +++ b/pipenv/resolver.py @@ -745,12 +745,15 @@ def resolve_packages(pre, clear, verbose, system, write, requirements_dir, packa else None ) - def resolve(packages, pre, project, sources, clear, system, requirements_dir=None): + def resolve( + packages, pre, project, sources, clear, system, dev, requirements_dir=None + ): return resolve_deps( packages, which, project=project, pre=pre, + dev=dev, sources=sources, clear=clear, allow_global=system, @@ -769,6 +772,7 @@ def resolve(packages, pre, project, sources, clear, system, requirements_dir=Non results, resolver = resolve( packages, pre=pre, + dev=dev, project=project, sources=sources, clear=clear, diff --git a/pipenv/utils/dependencies.py b/pipenv/utils/dependencies.py index b4ca1a7028..70b4c31531 100644 --- a/pipenv/utils/dependencies.py +++ b/pipenv/utils/dependencies.py @@ -280,6 +280,60 @@ def convert_deps_to_pip( return f.name +def get_constraints_from_deps(deps): + """Get contraints from Pipfile-formatted dependency""" + from pipenv.vendor.requirementslib.models.requirements import Requirement + + def is_constraint(dep): + # https://pip.pypa.io/en/stable/user_guide/#constraints-files + # constraints must have a name, they cannot be editable, and they cannot specify extras. + return dep.name and not dep.editable and not dep.extras + + constraints = [] + for dep_name, dep in deps.items(): + new_dep = Requirement.from_pipfile(dep_name, dep) + if is_constraint(new_dep): + c = new_dep.as_line().strip() + constraints.append(c) + return constraints + + +def prepare_constraint_file( + constraints, + directory=None, + sources=None, + pip_args=None, +): + from pipenv.vendor.vistir.path import ( + create_tracked_tempdir, + create_tracked_tempfile, + ) + + if not directory: + directory = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-") + + constraints_file = create_tracked_tempfile( + mode="w", + prefix="pipenv-", + suffix="-constraints.txt", + dir=directory, + delete=False, + ) + + if sources and pip_args: + skip_args = ("build-isolation", "use-pep517", "cache-dir") + args_to_add = [ + arg for arg in pip_args if not any(bad_arg in arg for bad_arg in skip_args) + ] + requirementstxt_sources = " ".join(args_to_add) if args_to_add else "" + requirementstxt_sources = requirementstxt_sources.replace(" --", "\n--") + constraints_file.write(f"{requirementstxt_sources}\n") + + constraints_file.write("\n".join([c for c in constraints])) + constraints_file.close() + return constraints_file.name + + def is_required_version(version, specified_version): """Check to see if there's a hard requirement for version number provided in the Pipfile. diff --git a/pipenv/utils/resolver.py b/pipenv/utils/resolver.py index 596358bead..b8349e95dd 100644 --- a/pipenv/utils/resolver.py +++ b/pipenv/utils/resolver.py @@ -17,11 +17,15 @@ from pipenv.patched.pip._internal.operations.build.build_tracker import ( get_build_tracker, ) +from pipenv.patched.pip._internal.req.constructors import ( + install_req_from_parsed_requirement, +) from pipenv.patched.pip._internal.req.req_file import parse_requirements from pipenv.patched.pip._internal.utils.hashes import FAVORITE_HASH from pipenv.patched.pip._internal.utils.temp_dir import global_tempdir_manager from pipenv.project import Project from pipenv.vendor import click +from pipenv.vendor.cached_property import cached_property from pipenv.vendor.requirementslib import Pipfile, Requirement from pipenv.vendor.requirementslib.models.requirements import Line from pipenv.vendor.requirementslib.models.utils import DIRECT_URL_RE @@ -32,9 +36,11 @@ HackedPythonVersion, clean_pkg_version, convert_deps_to_pip, + get_constraints_from_deps, get_vcs_deps, is_pinned_requirement, pep423_name, + prepare_constraint_file, translate_markers, ) from .indexes import parse_indexes, prepare_pip_source_args @@ -117,6 +123,7 @@ def __init__( skipped=None, clear=False, pre=False, + dev=False, ): self.initial_constraints = constraints self.req_dir = req_dir @@ -126,6 +133,7 @@ def __init__( self.hashes = {} self.clear = clear self.pre = pre + self.dev = dev self.results = None self.markers_lookup = markers_lookup if markers_lookup is not None else {} self.index_lookup = index_lookup if index_lookup is not None else {} @@ -420,6 +428,7 @@ def create( req_dir: str = None, clear: bool = False, pre: bool = False, + dev: bool = False, ) -> "Resolver": if not req_dir: @@ -450,6 +459,7 @@ def create( skipped=skipped, clear=clear, pre=pre, + dev=dev, ) @classmethod @@ -522,29 +532,13 @@ def pip_args(self): return self._pip_args def prepare_constraint_file(self): - from pipenv.vendor.vistir.path import create_tracked_tempfile - - constraints_file = create_tracked_tempfile( - mode="w", - prefix="pipenv-", - suffix="-constraints.txt", - dir=self.req_dir, - delete=False, + constraint_filename = prepare_constraint_file( + self.initial_constraints, + directory=self.req_dir, + sources=self.sources, + pip_args=self.pip_args, ) - skip_args = ("build-isolation", "use-pep517", "cache-dir") - args_to_add = [ - arg - for arg in self.pip_args - if not any(bad_arg in arg for bad_arg in skip_args) - ] - if self.sources: - requirementstxt_sources = " ".join(args_to_add) if args_to_add else "" - requirementstxt_sources = requirementstxt_sources.replace(" --", "\n--") - constraints_file.write(f"{requirementstxt_sources}\n") - constraints = self.initial_constraints - constraints_file.write("\n".join([c for c in constraints])) - constraints_file.close() - return constraints_file.name + return constraint_filename @property def constraint_file(self): @@ -552,6 +546,17 @@ def constraint_file(self): self._constraint_file = self.prepare_constraint_file() return self._constraint_file + @cached_property + def default_constraint_file(self): + default_constraints = get_constraints_from_deps(self.project.packages) + default_constraint_filename = prepare_constraint_file( + default_constraints, + directory=self.req_dir, + sources=None, + pip_args=None, + ) + return default_constraint_filename + @property def pip_options(self): if self._pip_options is None: @@ -630,12 +635,33 @@ def parsed_constraints(self): ) return self._parsed_constraints - @property - def constraints(self): - from pipenv.patched.pip._internal.req.constructors import ( - install_req_from_parsed_requirement, + @cached_property + def parsed_default_constraints(self): + pip_options = self.pip_options + pip_options.extra_index_urls = [] + parsed_default_constraints = parse_requirements( + self.default_constraint_file, + constraint=True, + finder=self.finder, + session=self.session, + options=pip_options, ) + return parsed_default_constraints + + @cached_property + def default_constraints(self): + default_constraints = [ + install_req_from_parsed_requirement( + c, + isolated=self.pip_options.build_isolation, + user_supplied=False, + ) + for c in self.parsed_default_constraints + ] + return default_constraints + @property + def constraints(self): if self._constraints is None: self._constraints = [ install_req_from_parsed_requirement( @@ -646,6 +672,9 @@ def constraints(self): ) for c in self.parsed_constraints ] + # Only use default_constraints when installing dev-packages + if self.dev: + self._constraints += self.default_constraints return self._constraints @contextlib.contextmanager @@ -870,6 +899,7 @@ def actually_resolve_deps( sources, clear, pre, + dev, req_dir=None, ): if not req_dir: @@ -878,7 +908,7 @@ def actually_resolve_deps( with warnings.catch_warnings(record=True) as warning_list: resolver = Resolver.create( - deps, project, index_lookup, markers_lookup, sources, req_dir, clear, pre + deps, project, index_lookup, markers_lookup, sources, req_dir, clear, pre, dev ) resolver.resolve() hashes = resolver.resolve_hashes() @@ -1064,6 +1094,7 @@ def resolve_deps( python=False, clear=False, pre=False, + dev=False, allow_global=False, req_dir=None, ): @@ -1094,6 +1125,7 @@ def resolve_deps( sources, clear, pre, + dev, req_dir=req_dir, ) except RuntimeError: @@ -1122,6 +1154,7 @@ def resolve_deps( sources, clear, pre, + dev, req_dir=req_dir, ) except RuntimeError: diff --git a/tests/integration/test_install_basic.py b/tests/integration/test_install_basic.py index 91b9d33674..bc3d919a23 100644 --- a/tests/integration/test_install_basic.py +++ b/tests/integration/test_install_basic.py @@ -509,3 +509,29 @@ def test_install_with_unnamed_source(PipenvInstance): f.write(contents) c = p.pipenv("install") assert c.returncode == 0 + +@pytest.mark.dev +@pytest.mark.install +def test_install_dev_use_default_constraints(PipenvInstance): + # See https://github.com/pypa/pipenv/issues/4371 + # See https://github.com/pypa/pipenv/issues/2987 + with PipenvInstance(chdir=True) as p: + + c = p.pipenv("install requests==2.14.0") + assert c.returncode == 0 + assert "requests" in p.lockfile["default"] + assert p.lockfile["default"]["requests"]["version"] == "==2.14.0" + + c = p.pipenv("install --dev requests") + assert c.returncode == 0 + assert "requests" in p.lockfile["develop"] + assert p.lockfile["develop"]["requests"]["version"] == "==2.14.0" + + # requests 2.14.0 doesn't require these packages + assert "idna" not in p.lockfile["develop"] + assert "certifi" not in p.lockfile["develop"] + assert "urllib3" not in p.lockfile["develop"] + assert "chardet" not in p.lockfile["develop"] + + c = p.pipenv("run python -c 'import urllib3'") + assert c.returncode != 0 diff --git a/tests/integration/test_lock.py b/tests/integration/test_lock.py index 0208c212bd..28e7f21f43 100644 --- a/tests/integration/test_lock.py +++ b/tests/integration/test_lock.py @@ -791,3 +791,38 @@ def test_pipenv_respects_package_index_restrictions(PipenvInstance): 'sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a'], 'index': 'local', 'version': '==2.19.1'} assert p.lockfile['default']['requests'] == expected_result + + +@pytest.mark.dev +@pytest.mark.lock +@pytest.mark.install +def test_dev_lock_use_default_packages_as_constraint(PipenvInstance): + # See https://github.com/pypa/pipenv/issues/4371 + # See https://github.com/pypa/pipenv/issues/2987 + with PipenvInstance(chdir=True) as p: + with open(p.pipfile_path, 'w') as f: + contents = """ +[packages] +requests = "<=2.14.0" + +[dev-packages] +requests = "*" + """.strip() + f.write(contents) + + c = p.pipenv("lock --dev") + assert c.returncode == 0 + assert "requests" in p.lockfile["default"] + assert p.lockfile["default"]["requests"]["version"] == "==2.14.0" + assert "requests" in p.lockfile["develop"] + assert p.lockfile["develop"]["requests"]["version"] == "==2.14.0" + + # requests 2.14.0 doesn't require these packages + assert "idna" not in p.lockfile["develop"] + assert "certifi" not in p.lockfile["develop"] + assert "urllib3" not in p.lockfile["develop"] + assert "chardet" not in p.lockfile["develop"] + + c = p.pipenv("install --dev") + c = p.pipenv("run python -c 'import urllib3'") + assert c.returncode != 0 diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 3aa22dbbb7..76835bdbc8 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -131,6 +131,20 @@ def test_convert_deps_to_pip(deps, expected): def test_convert_deps_to_pip_one_way(deps, expected): assert dependencies.convert_deps_to_pip(deps, r=False) == [expected.lower()] +@pytest.mark.utils +@pytest.mark.parametrize( + "deps, expected", + [ + ({"requests": {}}, ["requests"]), + ({"FooProject": {"path": ".", "editable" : "true"}}, []), + ({"FooProject": {"version": "==1.2"}}, ["fooproject==1.2"]), + ({"requests": {"extras": ["security"]}}, []), + ({"requests": {"extras": []}}, ["requests"]), + ({"extras" : {}}, ["extras"]), + ], +) +def test_get_constraints_from_deps(deps, expected): + assert dependencies.get_constraints_from_deps(deps) == expected @pytest.mark.parametrize("line,result", [ ("-i https://example.com/simple/", ("https://example.com/simple/", None, None, [])), From 8ba5d34079709d3e7d42368c37cfeca3a89990af Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 08:41:29 -0400 Subject: [PATCH 044/200] Update pypi release action. --- .github/workflows/pypi_upload.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 7ffbebcead..acb2b16b7f 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -47,7 +47,7 @@ jobs: # to upload to test pypi, pass repository_url: https://test.pypi.org/legacy/ and use secrets.TEST_PYPI_TOKEN - name: Publish a Python distribution to PyPI - uses: pypa/gh-action-pypi-publish@master + uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From fc6857dbcf52de605ab494b20495338edfd79006 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 08:41:58 -0400 Subject: [PATCH 045/200] Release v2022.8.13 --- CHANGELOG.rst | 29 +++++ news/5128.bugfix.rst | 1 - news/5200.removal.rst | 1 - news/5214.bugfix.rst | 1 - news/5226.trivial.rst | 1 - news/5229.bugfix.rst | 1 - news/5230.vendor.rst | 1 - news/5234.bugfix.rst | 2 - news/5235.doc.rst | 1 - pipenv/__version__.py | 2 +- pipenv/pipenv.1 | 264 ++++++++++++++++++++++++++++++++++++++++-- 11 files changed, 287 insertions(+), 17 deletions(-) delete mode 100644 news/5128.bugfix.rst delete mode 100644 news/5200.removal.rst delete mode 100644 news/5214.bugfix.rst delete mode 100644 news/5226.trivial.rst delete mode 100644 news/5229.bugfix.rst delete mode 100644 news/5230.vendor.rst delete mode 100644 news/5234.bugfix.rst delete mode 100644 news/5235.doc.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 289a043fe0..77e43aa148 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,32 @@ +2022.8.13 (2022-08-13) +====================== + + +Bug Fixes +--------- + +- If environment variable ``CI`` or ``TF_BUILD`` is set but does not evaluate to ``False`` it is now treated as ``True``. `#5128 `_ +- Fix auto-complete crashing on 'install' and 'uninstall' keywords `#5214 `_ +- Address remaining ``pipenv`` commands that were still referencing the user or system installed ``pip`` to use the vendored ``pip`` internal to ``pipenv``. `#5229 `_ +- Use ``packages`` as contraints when locking ``dev-packages`` in Pipfile. + Use ``packages`` as contraints when installing new ``dev-packages``. `#5234 `_ + +Vendored Libraries +------------------ + +- Vendor in minor ``pip`` update ``22.2.2`` `#5230 `_ + +Improved Documentation +---------------------- + +- Add documentation for environment variables the configure pipenv. `#5235 `_ + +Removals and Deprecations +------------------------- + +- The deprecated way of generating requirements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. `#5200 `_ + + 2022.8.5 (2022-08-05) ===================== diff --git a/news/5128.bugfix.rst b/news/5128.bugfix.rst deleted file mode 100644 index 120ce82e56..0000000000 --- a/news/5128.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -If environment variable ``CI`` or ``TF_BUILD`` is set but does not evaluate to ``False`` it is now treated as ``True``. diff --git a/news/5200.removal.rst b/news/5200.removal.rst deleted file mode 100644 index 848ef1c05c..0000000000 --- a/news/5200.removal.rst +++ /dev/null @@ -1 +0,0 @@ -The deprecated way of generating requirements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. diff --git a/news/5214.bugfix.rst b/news/5214.bugfix.rst deleted file mode 100644 index 601dc83564..0000000000 --- a/news/5214.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix auto-complete crashing on 'install' and 'uninstall' keywords diff --git a/news/5226.trivial.rst b/news/5226.trivial.rst deleted file mode 100644 index 8937f4dcfa..0000000000 --- a/news/5226.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Modernize the test ``test_convert_deps_to_pip`` to not use ``pip-shims`` and the code it calls to not use ``vistir``. diff --git a/news/5229.bugfix.rst b/news/5229.bugfix.rst deleted file mode 100644 index bb8bcdaae7..0000000000 --- a/news/5229.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Address remaining ``pipenv`` commands that were still referencing the user or system installed ``pip`` to use the vendored ``pip`` internal to ``pipenv``. diff --git a/news/5230.vendor.rst b/news/5230.vendor.rst deleted file mode 100644 index 6994c6b117..0000000000 --- a/news/5230.vendor.rst +++ /dev/null @@ -1 +0,0 @@ -Vendor in minor ``pip`` update ``22.2.2`` diff --git a/news/5234.bugfix.rst b/news/5234.bugfix.rst deleted file mode 100644 index 83cbad392d..0000000000 --- a/news/5234.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use ``packages`` as contraints when locking ``dev-packages`` in Pipfile. -Use ``packages`` as contraints when installing new ``dev-packages``. diff --git a/news/5235.doc.rst b/news/5235.doc.rst deleted file mode 100644 index 54c3dd93c6..0000000000 --- a/news/5235.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Add documentation for environment variables the configure pipenv. diff --git a/pipenv/__version__.py b/pipenv/__version__.py index 0e1278e604..27fa5083b4 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.5.dev" +__version__ = "2022.8.13" diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index 7fe1b40623..885b5a3f52 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PIPENV" "1" "Aug 05, 2022" "2022.8.5" "pipenv" +.TH "PIPENV" "1" "Aug 13, 2022" "2022.8.13" "pipenv" .SH NAME pipenv \- pipenv Documentation \fI\%\fP\fI\%\fP\fI\%\fP @@ -453,7 +453,35 @@ You might want to set \fBexport PIPENV_VENV_IN_PROJECT=1\fP in your .bashrc/.zsh .sp Congratulations, you now know how to install and use Python packages! ✨ 🍰 ✨ .SS Release and Version History -.SS 2022.7.24 (2022\-08\-05) +.SS 2022.8.13 (2022\-08\-13) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +If environment variable \fBCI\fP or \fBTF_BUILD\fP is set but does not evaluate to \fBFalse\fP it is now treated as \fBTrue\fP\&. \fI\%#5128\fP +.IP \(bu 2 +Fix auto\-complete crashing on \(aqinstall\(aq and \(aquninstall\(aq keywords \fI\%#5214\fP +.IP \(bu 2 +Address remaining \fBpipenv\fP commands that were still referencing the user or system installed \fBpip\fP to use the vendored \fBpip\fP internal to \fBpipenv\fP\&. \fI\%#5229\fP +.IP \(bu 2 +Use \fBpackages\fP as contraints when locking \fBdev\-packages\fP in Pipfile. +Use \fBpackages\fP as contraints when installing new \fBdev\-packages\fP\&. \fI\%#5234\fP +.UNINDENT +.SS Vendored Libraries +.INDENT 0.0 +.IP \(bu 2 +Vendor in minor \fBpip\fP update \fB22.2.2\fP \fI\%#5230\fP +.UNINDENT +.SS Improved Documentation +.INDENT 0.0 +.IP \(bu 2 +Add documentation for environment variables the configure pipenv. \fI\%#5235\fP +.UNINDENT +.SS Removals and Deprecations +.INDENT 0.0 +.IP \(bu 2 +The deprecated way of generating requirements \fBinstall \-r\fP or \fBlock \-r\fP has been removed in favor of the \fBpipenv requirements\fP command. \fI\%#5200\fP +.UNINDENT +.SS 2022.8.5 (2022\-08\-05) .SS Features & Improvements .INDENT 0.0 .IP \(bu 2 @@ -1344,28 +1372,28 @@ Update vendored dependencies and invocations .INDENT 2.0 .IP \(bu 2 Update vendored and patched dependencies -\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, +\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, .nf \(ga\(ga .fi tomlkit\(ga .IP \(bu 2 Fix invocations of dependencies -\- Fix custom +\- Fix custom .nf \(ga\(ga .fi InstallCommand\(ga instantiation -\- Update +\- Update .nf \(ga\(ga .fi PackageFinder\(ga usage -\- Fix +\- Fix .nf \(ga\(ga .fi -Bool\(ga stringify attempts from +Bool\(ga stringify attempts from .nf \(ga\(ga .fi @@ -3805,6 +3833,228 @@ Default is to show emojis. This is automatically set on Windows. .UNINDENT .INDENT 0.0 .TP +.B class pipenv.environments.Setting +Control various settings of pipenv via environment variables. +.INDENT 7.0 +.TP +.B PIPENV_CACHE_DIR +Location for Pipenv to store it\(aqs package cache. +Default is to use appdir\(aqs user cache directory. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_CUSTOM_VENV_NAME +Tells Pipenv whether to name the venv something other than the default dir name. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_DEFAULT_PYTHON_VERSION +Use this Python version when creating new virtual environments by default. +.sp +This can be set to a version string, e.g. \fB3.9\fP, or a path. Default is to use +whatever Python Pipenv is installed under (i.e. \fBsys.executable\fP). Command +line flags (e.g. \fB\-\-python\fP and \fB\-\-three\fP) are prioritized over +this configuration. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_DONT_LOAD_ENV +If set, Pipenv does not load the \fB\&.env\fP file. +.sp +Default is to load \fB\&.env\fP for \fBrun\fP and \fBshell\fP commands. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_DONT_USE_ASDF +If set, Pipenv does not attempt to install Python with asdf. +.sp +Default is to install Python automatically via asdf when needed, if possible. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_DONT_USE_PYENV +If set, Pipenv does not attempt to install Python with pyenv. +.sp +Default is to install Python automatically via pyenv when needed, if possible. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_DOTENV_LOCATION +If set, Pipenv loads the \fB\&.env\fP file at the specified location. +.sp +Default is to load \fB\&.env\fP from the project root, if found. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_EMULATOR +If set, the terminal emulator\(aqs name for \fBpipenv shell\fP to use. +.sp +Default is to detect emulators automatically. This should be set if your +emulator, e.g. Cmder, cannot be detected correctly. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_IGNORE_VIRTUALENVS +If set, Pipenv will always assign a virtual environment for this project. +.sp +By default, Pipenv tries to detect whether it is run inside a virtual +environment, and reuses it if possible. This is usually the desired behavior, +and enables the user to use any user\-built environments with Pipenv. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_INSTALL_TIMEOUT +Max number of seconds to wait for package installation. +.sp +Defaults to 900 (15 minutes), a very long arbitrary time. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_MAX_DEPTH +Maximum number of directories to recursively search for a Pipfile. +.sp +Default is 3. See also \fBPIPENV_NO_INHERIT\fP\&. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_MAX_RETRIES +Specify how many retries Pipenv should attempt for network requests. +.sp +Default is 0. Automatically set to 1 on CI environments for robust testing. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_MAX_SUBPROCESS +How many subprocesses should Pipenv use when installing. +.sp +Default is 16, an arbitrary number that seems to work. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_NOSPIN +If set, disable terminal spinner. +.sp +This can make the logs cleaner. Automatically set on Windows, and in CI +environments. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_NO_INHERIT +Tell Pipenv not to inherit parent directories. +.sp +This is useful for deployment to avoid using the wrong current directory. +Overwrites \fBPIPENV_MAX_DEPTH\fP\&. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_PIPFILE +If set, this specifies a custom Pipfile location. +.sp +When running pipenv from a location other than the same directory where the +Pipfile is located, instruct pipenv to find the Pipfile in the location +specified by this environment variable. +.sp +Default is to find Pipfile automatically in the current and parent directories. +See also \fBPIPENV_MAX_DEPTH\fP\&. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_PYPI_MIRROR +If set, tells pipenv to override PyPI index urls with a mirror. +.sp +Default is to not mirror PyPI, i.e. use the real one, pypi.org. The +\fB\-\-pypi\-mirror\fP command line flag overwrites this. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_QUIET +If set, makes Pipenv quieter. +.sp +Default is unset, for normal verbosity. \fBPIPENV_VERBOSE\fP overrides this. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_RESOLVE_VCS +Tells Pipenv whether to resolve all VCS dependencies in full. +.sp +As of Pipenv 2018.11.26, only editable VCS dependencies were resolved in full. +To retain this behavior and avoid handling any conflicts that arise from the new +approach, you may set this to \(aq0\(aq, \(aqoff\(aq, or \(aqfalse\(aq. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_SHELL_EXPLICIT +An absolute path to the preferred shell for \fBpipenv shell\fP\&. +.sp +Default is to detect automatically what shell is currently in use. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_SHELL_FANCY +If set, always use fancy mode when invoking \fBpipenv shell\fP\&. +.sp +Default is to use the compatibility shell if possible. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_SKIP_LOCK +If set, Pipenv won\(aqt lock dependencies automatically. +.sp +This might be desirable if a project has large number of dependencies, +because locking is an inherently slow operation. +.sp +Default is to lock dependencies and update \fBPipfile.lock\fP on each run. +.sp +Usage: \fIexport PIPENV_SKIP_LOCK=true\fP OR \fIexport PIPENV_SKIP_LOCK=1\fP to skip automatic locking +.sp +NOTE: This only affects the \fBinstall\fP and \fBuninstall\fP commands. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_SPINNER +Sets the default spinner type. +.sp +Spinners are identical to the \fBnode.js\fP spinners and can be found at +\fI\%https://github.com/sindresorhus/cli\-spinners\fP +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_TIMEOUT +Max number of seconds Pipenv will wait for virtualenv creation to complete. +.sp +Default is 120 seconds, an arbitrary number that seems to work. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_VERBOSE +If set, makes Pipenv more wordy. +.sp +Default is unset, for normal verbosity. This takes precedence over +\fBPIPENV_QUIET\fP\&. +.UNINDENT +.INDENT 7.0 +.TP +.B PIPENV_YES +If set, Pipenv automatically assumes "yes" at all prompts. +.sp +Default is to prompt the user for an answer if the current command line session +if interactive. +.UNINDENT +.INDENT 7.0 +.TP +.B PIP_EXISTS_ACTION +Specifies the value for pip\(aqs \-\-exists\-action option +.sp +Defaults to \fB(w)ipe\fP +.UNINDENT +.INDENT 7.0 +.TP +.B USING_DEFAULT_PYTHON +Use the default Python +.UNINDENT +.UNINDENT +.INDENT 0.0 +.TP .B pipenv.environments.get_from_env(arg, prefix=\(aqPIPENV\(aq, check_for_negation=True) Check the environment for a variable, returning its truthy or stringified value .sp From 765194e4a28569296bd036e8582365332e1dce37 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 13 Aug 2022 12:45:48 +0000 Subject: [PATCH 046/200] Bumped version. Signed-off-by: github-actions[bot] --- pipenv/__version__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/__version__.py b/pipenv/__version__.py index 27fa5083b4..e0360c2108 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.13" +__version__ = "2022.8.14.dev0" From 1b43fc8399c32e363aff71ff1d3136d3cd66345c Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 08:46:41 -0400 Subject: [PATCH 047/200] Mark these tests as flaky. --- tests/unit/test_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 76835bdbc8..27a7806700 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1,6 +1,7 @@ import os from unittest import mock +from flaky import flaky import pytest import pipenv.utils.shell @@ -81,6 +82,7 @@ def mock_unpack(link, source_dir, download_dir, only_download=False, session=Non return +@flaky @pytest.mark.utils @pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS) @pytest.mark.needs_internet @@ -90,6 +92,7 @@ def test_convert_deps_to_pip(deps, expected): assert dependencies.convert_deps_to_pip(deps, r=False) == [expected] +@flaky @pytest.mark.utils @pytest.mark.parametrize( "deps, expected", From 4e0c1be15512e501fc7bf126626e14e0599ab14d Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 08:50:36 -0400 Subject: [PATCH 048/200] Fix linter --- pipenv/pipenv.1 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index 885b5a3f52..cedbd1bbd7 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -1372,28 +1372,28 @@ Update vendored dependencies and invocations .INDENT 2.0 .IP \(bu 2 Update vendored and patched dependencies -\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, +\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, .nf \(ga\(ga .fi tomlkit\(ga .IP \(bu 2 Fix invocations of dependencies -\- Fix custom +\- Fix custom .nf \(ga\(ga .fi InstallCommand\(ga instantiation -\- Update +\- Update .nf \(ga\(ga .fi PackageFinder\(ga usage -\- Fix +\- Fix .nf \(ga\(ga .fi -Bool\(ga stringify attempts from +Bool\(ga stringify attempts from .nf \(ga\(ga .fi From ffe4adcd57b4543a0ece700830a3a5582b46aa78 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 13 Aug 2022 16:24:13 +0200 Subject: [PATCH 049/200] Upgrade GitHub Actions (#5244) * Upgrade GitHub Actions --- .github/workflows/ci.yaml | 22 +++++++++++----------- .github/workflows/pypi_upload.yml | 4 ++-- news/5244.trivial.rst | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) create mode 100644 news/5244.trivial.rst diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 48fcfacae0..0f0ee18b21 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -42,10 +42,10 @@ jobs: PYTHONIOENCODING: "utf-8" GIT_ASK_YESNO: "false" steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: 3.x - run: | python -m pip install pre-commit pre-commit run --all-files --verbose --show-diff-on-failure @@ -54,10 +54,10 @@ jobs: needs: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: 3.x - run: | python -m pip install --upgrade wheel invoke parver bs4 vistir towncrier requests python -m invoke vendoring.update @@ -72,10 +72,10 @@ jobs: os: [MacOS, Ubuntu, Windows] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -115,10 +115,10 @@ jobs: needs: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: 3.x - run: pip install -U build twine - run: | python -m build diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index acb2b16b7f..c5ee31c546 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Create Release id: create_release @@ -29,7 +29,7 @@ jobs: prerelease: false - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.9 diff --git a/news/5244.trivial.rst b/news/5244.trivial.rst new file mode 100644 index 0000000000..8547e49845 --- /dev/null +++ b/news/5244.trivial.rst @@ -0,0 +1 @@ +Upgrade GitHub Actions From e0d753cae5db342e70a9c9bb79401dae6d75a0c8 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 13:23:30 -0400 Subject: [PATCH 050/200] Don't count packaging as a bad package to ommit. --- pipenv/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pipenv/core.py b/pipenv/core.py index 22895e72ac..f995cda003 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -63,7 +63,6 @@ # Packages that should be ignored later. BAD_PACKAGES = ( "distribute", - "packaging", "pip", "pkg-resources", "setuptools", From 2f4d5a39e75224506a6967f88a8684a6a2cbb27d Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 13:43:26 -0400 Subject: [PATCH 051/200] Add unit test for this. --- tests/integration/test_install_basic.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/integration/test_install_basic.py b/tests/integration/test_install_basic.py index bc3d919a23..f0741f15f2 100644 --- a/tests/integration/test_install_basic.py +++ b/tests/integration/test_install_basic.py @@ -535,3 +535,16 @@ def test_install_dev_use_default_constraints(PipenvInstance): c = p.pipenv("run python -c 'import urllib3'") assert c.returncode != 0 + + +@pytest.mark.dev +@pytest.mark.basic +@pytest.mark.install +@pytest.mark.needs_internet +def test_install_does_not_exclude_packaging(PipenvInstance): + """Ensure that running `pipenv install` doesn't exclude packaging when its required. """ + with PipenvInstance(chdir=True) as p: + c = p.pipenv("install dataclasses-json") + assert c.returncode == 0 + c = p.pipenv("run python -c 'from dataclasses_json import DataClassJsonMixin'") + assert c.returncode == 0 From e11d89013bedc60ac5c201831b87f895d809f0d0 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sat, 13 Aug 2022 13:45:49 -0400 Subject: [PATCH 052/200] Add news fragment. --- news/5247.bugfix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/5247.bugfix.rst diff --git a/news/5247.bugfix.rst b/news/5247.bugfix.rst new file mode 100644 index 0000000000..1bef0b9d8a --- /dev/null +++ b/news/5247.bugfix.rst @@ -0,0 +1 @@ +Removed ``packaging`` library from ``BAD_PACKAGES`` constant to allow it to be installed, which fixes regression from ``pipenv==2022.8.13``. From 2236ee2af14e2e73c57313c59edd63d028cd8576 Mon Sep 17 00:00:00 2001 From: Oz N Tiram Date: Sun, 14 Aug 2022 05:04:25 +0200 Subject: [PATCH 053/200] Remove vendored cached_property (#5249) The class cached_property is available in the Python STL from version 3.8 onwards. In addition, it is redifend in included libraries so we can just use it from there. --- pipenv/environment.py | 9 +- pipenv/project.py | 9 +- pipenv/utils/resolver.py | 8 +- pipenv/vendor/cached-property.LICENSE | 12 -- pipenv/vendor/cached_property.LICENSE | 12 -- pipenv/vendor/cached_property.py | 153 ------------------ pipenv/vendor/pythonfinder/models/path.py | 2 +- .../requirementslib/models/requirements.py | 2 +- pipenv/vendor/vendor.txt | 1 - tasks/vendoring/__init__.py | 4 + 10 files changed, 29 insertions(+), 183 deletions(-) delete mode 100644 pipenv/vendor/cached-property.LICENSE delete mode 100644 pipenv/vendor/cached_property.LICENSE delete mode 100644 pipenv/vendor/cached_property.py diff --git a/pipenv/environment.py b/pipenv/environment.py index 36016bbb85..f911eab8cf 100644 --- a/pipenv/environment.py +++ b/pipenv/environment.py @@ -22,7 +22,14 @@ from pipenv.utils.processes import subprocess_run from pipenv.utils.shell import make_posix, normalize_path from pipenv.vendor import click, vistir -from pipenv.vendor.cached_property import cached_property + +try: + # this is only in Python3.8 and later + from functools import cached_property +except ImportError: + # eventually distlib will remove cached property when they drop Python3.7 + from pipenv.patched.pip._vendor.distlib.util import cached_property + if is_type_checking(): from types import ModuleType diff --git a/pipenv/project.py b/pipenv/project.py index a78334305b..9823cae70e 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -42,9 +42,16 @@ system_which, ) from pipenv.utils.toml import cleanup_toml, convert_toml_outline_tables -from pipenv.vendor.cached_property import cached_property from pipenv.vendor.requirementslib.models.utils import get_default_pyproject_backend +try: + # this is only in Python3.8 and later + from functools import cached_property +except ImportError: + # eventually distlib will remove cached property when they drop Python3.7 + from pipenv.patched.pip._vendor.distlib.util import cached_property + + if is_type_checking(): from typing import Dict, List, Optional, Set, Text, Tuple, Union diff --git a/pipenv/utils/resolver.py b/pipenv/utils/resolver.py index b8349e95dd..f88eaff340 100644 --- a/pipenv/utils/resolver.py +++ b/pipenv/utils/resolver.py @@ -25,13 +25,19 @@ from pipenv.patched.pip._internal.utils.temp_dir import global_tempdir_manager from pipenv.project import Project from pipenv.vendor import click -from pipenv.vendor.cached_property import cached_property from pipenv.vendor.requirementslib import Pipfile, Requirement from pipenv.vendor.requirementslib.models.requirements import Line from pipenv.vendor.requirementslib.models.utils import DIRECT_URL_RE from pipenv.vendor.vistir import TemporaryDirectory, open_file from pipenv.vendor.vistir.path import create_tracked_tempdir +try: + # this is only in Python3.8 and later + from functools import cached_property +except ImportError: + # eventually distlib will remove cached property when they drop Python3.7 + from pipenv.patched.pip._vendor.distlib.util import cached_property + from .dependencies import ( HackedPythonVersion, clean_pkg_version, diff --git a/pipenv/vendor/cached-property.LICENSE b/pipenv/vendor/cached-property.LICENSE deleted file mode 100644 index a181761ca5..0000000000 --- a/pipenv/vendor/cached-property.LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2015, Daniel Greenfeld -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of cached-property nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/cached_property.LICENSE b/pipenv/vendor/cached_property.LICENSE deleted file mode 100644 index a181761ca5..0000000000 --- a/pipenv/vendor/cached_property.LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2015, Daniel Greenfeld -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of cached-property nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/cached_property.py b/pipenv/vendor/cached_property.py deleted file mode 100644 index 3135871bfb..0000000000 --- a/pipenv/vendor/cached_property.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- - -__author__ = "Daniel Greenfeld" -__email__ = "pydanny@gmail.com" -__version__ = "1.5.2" -__license__ = "BSD" - -from functools import wraps -from time import time -import threading - -try: - import asyncio -except (ImportError, SyntaxError): - asyncio = None - - -class cached_property(object): - """ - A property that is only computed once per instance and then replaces itself - with an ordinary attribute. Deleting the attribute resets the property. - Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76 - """ # noqa - - def __init__(self, func): - self.__doc__ = getattr(func, "__doc__") - self.func = func - - def __get__(self, obj, cls): - if obj is None: - return self - - if asyncio and asyncio.iscoroutinefunction(self.func): - return self._wrap_in_coroutine(obj) - - value = obj.__dict__[self.func.__name__] = self.func(obj) - return value - - def _wrap_in_coroutine(self, obj): - @wraps(obj) - @asyncio.coroutine - def wrapper(): - future = asyncio.ensure_future(self.func(obj)) - obj.__dict__[self.func.__name__] = future - return future - - return wrapper() - - -class threaded_cached_property(object): - """ - A cached_property version for use in environments where multiple threads - might concurrently try to access the property. - """ - - def __init__(self, func): - self.__doc__ = getattr(func, "__doc__") - self.func = func - self.lock = threading.RLock() - - def __get__(self, obj, cls): - if obj is None: - return self - - obj_dict = obj.__dict__ - name = self.func.__name__ - with self.lock: - try: - # check if the value was computed before the lock was acquired - return obj_dict[name] - - except KeyError: - # if not, do the calculation and release the lock - return obj_dict.setdefault(name, self.func(obj)) - - -class cached_property_with_ttl(object): - """ - A property that is only computed once per instance and then replaces itself - with an ordinary attribute. Setting the ttl to a number expresses how long - the property will last before being timed out. - """ - - def __init__(self, ttl=None): - if callable(ttl): - func = ttl - ttl = None - else: - func = None - self.ttl = ttl - self._prepare_func(func) - - def __call__(self, func): - self._prepare_func(func) - return self - - def __get__(self, obj, cls): - if obj is None: - return self - - now = time() - obj_dict = obj.__dict__ - name = self.__name__ - try: - value, last_updated = obj_dict[name] - except KeyError: - pass - else: - ttl_expired = self.ttl and self.ttl < now - last_updated - if not ttl_expired: - return value - - value = self.func(obj) - obj_dict[name] = (value, now) - return value - - def __delete__(self, obj): - obj.__dict__.pop(self.__name__, None) - - def __set__(self, obj, value): - obj.__dict__[self.__name__] = (value, time()) - - def _prepare_func(self, func): - self.func = func - if func: - self.__doc__ = func.__doc__ - self.__name__ = func.__name__ - self.__module__ = func.__module__ - - -# Aliases to make cached_property_with_ttl easier to use -cached_property_ttl = cached_property_with_ttl -timed_cached_property = cached_property_with_ttl - - -class threaded_cached_property_with_ttl(cached_property_with_ttl): - """ - A cached_property version for use in environments where multiple threads - might concurrently try to access the property. - """ - - def __init__(self, ttl=None): - super(threaded_cached_property_with_ttl, self).__init__(ttl) - self.lock = threading.RLock() - - def __get__(self, obj, cls): - with self.lock: - return super(threaded_cached_property_with_ttl, self).__get__(obj, cls) - - -# Alias to make threaded_cached_property_with_ttl easier to use -threaded_cached_property_ttl = threaded_cached_property_with_ttl -timed_threaded_cached_property = threaded_cached_property_with_ttl diff --git a/pipenv/vendor/pythonfinder/models/path.py b/pipenv/vendor/pythonfinder/models/path.py index 76bb50ab58..01ff67ddbe 100644 --- a/pipenv/vendor/pythonfinder/models/path.py +++ b/pipenv/vendor/pythonfinder/models/path.py @@ -11,7 +11,7 @@ import pipenv.vendor.attr as attr import pipenv.vendor.six as six -from pipenv.vendor.cached_property import cached_property +from pipenv.vendor.pyparsing.core import cached_property from ..compat import Path, fs_str from ..environment import ( diff --git a/pipenv/vendor/requirementslib/models/requirements.py b/pipenv/vendor/requirementslib/models/requirements.py index 8d717f4ce1..0a1b5253a7 100644 --- a/pipenv/vendor/requirementslib/models/requirements.py +++ b/pipenv/vendor/requirementslib/models/requirements.py @@ -15,7 +15,7 @@ import pipenv.vendor.attr as attr import pipenv.vendor.pip_shims as pip_shims -from pipenv.vendor.cached_property import cached_property +from pipenv.vendor.pyparsing.core import cached_property from pipenv.patched.pip._vendor.packaging.markers import Marker from pipenv.patched.pip._vendor.packaging.requirements import Requirement as PackagingRequirement from pipenv.patched.pip._vendor.packaging.specifiers import ( diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index a0b4812ec6..4851b23172 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -1,6 +1,5 @@ appdirs==1.4.4 attrs==21.2.0 -cached-property==1.5.2 cerberus==1.3.4 click-didyoumean==0.0.3 click==8.0.3 diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index d93ac782fc..5175063510 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -76,6 +76,10 @@ r"(? Date: Sun, 14 Aug 2022 00:03:38 -0400 Subject: [PATCH 054/200] Release v2022.8.14 --- CHANGELOG.rst | 10 ++++++++++ news/5244.trivial.rst | 1 - news/5247.bugfix.rst | 1 - pipenv/__version__.py | 2 +- pipenv/pipenv.1 | 18 ++++++++++++------ 5 files changed, 23 insertions(+), 9 deletions(-) delete mode 100644 news/5244.trivial.rst delete mode 100644 news/5247.bugfix.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 77e43aa148..2592a191a2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +2022.8.14 (2022-08-14) +====================== + + +Bug Fixes +--------- + +- Removed ``packaging`` library from ``BAD_PACKAGES`` constant to allow it to be installed, which fixes regression from ``pipenv==2022.8.13``. `#5247 `_ + + 2022.8.13 (2022-08-13) ====================== diff --git a/news/5244.trivial.rst b/news/5244.trivial.rst deleted file mode 100644 index 8547e49845..0000000000 --- a/news/5244.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Upgrade GitHub Actions diff --git a/news/5247.bugfix.rst b/news/5247.bugfix.rst deleted file mode 100644 index 1bef0b9d8a..0000000000 --- a/news/5247.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Removed ``packaging`` library from ``BAD_PACKAGES`` constant to allow it to be installed, which fixes regression from ``pipenv==2022.8.13``. diff --git a/pipenv/__version__.py b/pipenv/__version__.py index e0360c2108..d9f15a3cde 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.14.dev0" +__version__ = "2022.8.14" diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index cedbd1bbd7..553404ddc2 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PIPENV" "1" "Aug 13, 2022" "2022.8.13" "pipenv" +.TH "PIPENV" "1" "Aug 14, 2022" "2022.8.14" "pipenv" .SH NAME pipenv \- pipenv Documentation \fI\%\fP\fI\%\fP\fI\%\fP @@ -453,6 +453,12 @@ You might want to set \fBexport PIPENV_VENV_IN_PROJECT=1\fP in your .bashrc/.zsh .sp Congratulations, you now know how to install and use Python packages! ✨ 🍰 ✨ .SS Release and Version History +.SS 2022.8.14 (2022\-08\-14) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +Removed \fBpackaging\fP library from \fBBAD_PACKAGES\fP constant to allow it to be installed, which fixes regression from \fBpipenv==2022.8.13\fP\&. \fI\%#5247\fP +.UNINDENT .SS 2022.8.13 (2022\-08\-13) .SS Bug Fixes .INDENT 0.0 @@ -1372,28 +1378,28 @@ Update vendored dependencies and invocations .INDENT 2.0 .IP \(bu 2 Update vendored and patched dependencies -\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, +\- Update patches on \fBpiptools\fP, \fBpip\fP, \fBpip\-shims\fP, .nf \(ga\(ga .fi tomlkit\(ga .IP \(bu 2 Fix invocations of dependencies -\- Fix custom +\- Fix custom .nf \(ga\(ga .fi InstallCommand\(ga instantiation -\- Update +\- Update .nf \(ga\(ga .fi PackageFinder\(ga usage -\- Fix +\- Fix .nf \(ga\(ga .fi -Bool\(ga stringify attempts from +Bool\(ga stringify attempts from .nf \(ga\(ga .fi From 44328a54621aac4b10ad106461f2eb627843f1c8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 14 Aug 2022 04:06:51 +0000 Subject: [PATCH 055/200] Bumped version. Signed-off-by: github-actions[bot] --- pipenv/__version__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/__version__.py b/pipenv/__version__.py index d9f15a3cde..ceb814e34e 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.14" +__version__ = "2022.8.15.dev0" From d65e9a3208cfa8f656b2f137ce90e2f503930c1e Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 14 Aug 2022 23:43:36 -0400 Subject: [PATCH 056/200] Remvoe reference to -r from documentation. (#5258) --- docs/basics.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/basics.rst b/docs/basics.rst index 62ef1c55cc..4d13f1ae56 100644 --- a/docs/basics.rst +++ b/docs/basics.rst @@ -402,9 +402,7 @@ production environments for reproducible builds. .. note:: - If you'd like a ``requirements.txt`` output of the lockfile, run ``$ pipenv lock -r``. - This will not include hashes, however. To get a ``requirements.txt`` - you can also use ``$ pipenv run pip freeze``. + If you'd like a ``requirements.txt`` output of the lockfile, run ``$ pipenv requirements``. ☤ Pipenv and Docker Containers From c6c4ea9c3c7a04fbd0582a0b048d0534d85874ad Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 14 Aug 2022 23:44:19 -0400 Subject: [PATCH 057/200] Issue 5254 (#5255) * Fix for python not defaulting to the virtualenv during a pip install. Add --ignore-installed flag. --- .pre-commit-config.yaml | 2 +- news/5254.bugfix.rst | 1 + pipenv/core.py | 20 +++++++++----------- 3 files changed, 11 insertions(+), 12 deletions(-) create mode 100644 news/5254.bugfix.rst diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43e22bf32a..e0739d1046 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -exclude: '^(pipenv/patched/|pipenv/vendor/|tests/)' +exclude: '^(pipenv/patched/|pipenv/vendor/|tests/|pipenv/pipenv.1)' repos: - repo: https://github.com/pre-commit/pre-commit-hooks diff --git a/news/5254.bugfix.rst b/news/5254.bugfix.rst new file mode 100644 index 0000000000..330be90dc7 --- /dev/null +++ b/news/5254.bugfix.rst @@ -0,0 +1 @@ +``pip_install`` method was using a different way of finding the python executable than other ``pipenv`` commands, which caused an issue with skipping package installation if it was already installed in site-packages. diff --git a/pipenv/core.py b/pipenv/core.py index f995cda003..237bc74643 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import json as simplejson import logging import os @@ -10,6 +8,7 @@ import warnings from pathlib import Path from posixpath import expandvars +from typing import Dict, List, Optional, Union import dotenv import pipfile @@ -24,6 +23,7 @@ install_req_from_parsed_requirement, ) from pipenv.patched.pip._internal.req.req_file import parse_requirements +from pipenv.project import Project from pipenv.utils.constants import MYPY_RUNNING from pipenv.utils.dependencies import ( convert_deps_to_pip, @@ -50,12 +50,9 @@ ) from pipenv.utils.spinner import create_spinner from pipenv.vendor import click +from pipenv.vendor.requirementslib.models.requirements import Requirement if MYPY_RUNNING: - from typing import Dict, List, Optional, Union - - from pipenv.project import Project - from pipenv.vendor.requirementslib.models.requirements import Requirement TSourceDict = Dict[str, Union[str, bool]] @@ -1211,7 +1208,7 @@ def do_purge(project, bare=False, downloads=False, allow_global=False): click.echo(fix_utf8(f"Found {len(to_remove)} installed package(s), purging...")) command = [ - project_python(project), + project_python(project, system=allow_global), _get_runnable_pip(), "uninstall", "-y", @@ -1529,9 +1526,10 @@ def pip_install( ) pip_command = [ - project._which("python", allow_global=allow_global), + project_python(project, system=allow_global), _get_runnable_pip(), "install", + "--ignore-installed", ] pip_args = get_pip_args( project, @@ -2363,7 +2361,7 @@ def do_uninstall( if package_name in packages_to_remove: with project.environment.activated(): cmd = [ - project_python(project), + project_python(project, system=system), _get_runnable_pip(), "uninstall", package_name, @@ -2674,7 +2672,7 @@ def do_check( safety_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "patched", "safety" ) - _cmd = [project_python(project)] + _cmd = [project_python(project, system=system)] # Run the PEP 508 checker in the virtualenv. cmd = _cmd + [Path(pep508checker_path).as_posix()] c = run_command(cmd, is_verbose=project.s.is_verbose()) @@ -3037,7 +3035,7 @@ def do_clean( ) # Uninstall the package. cmd = [ - project_python(project), + project_python(project, system=system), _get_runnable_pip(), "uninstall", apparent_bad_package, From c805cecea9f152a989cca6345f9947e91758a423 Mon Sep 17 00:00:00 2001 From: jgart <47760695+jgarte@users.noreply.github.com> Date: Sun, 14 Aug 2022 22:46:17 -0500 Subject: [PATCH 058/200] Add install instructions for void linux (#5257) --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index d36be97bbc..f9e7cc1e19 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,10 @@ Or, if you\'re using FreeBSD: Or, if you\'re using Gentoo: sudo emerge pipenv + +Or, if you\'re using Void Linux: + + sudo xbps-install -S python3-pipenv Or, if you\'re using Windows: From f8b09a831a9d3fc3885d75d73742c5fb4151939b Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Sun, 14 Aug 2022 23:48:21 -0400 Subject: [PATCH 059/200] Remove function that is no longer used. (#5250) --- pipenv/utils/shell.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pipenv/utils/shell.py b/pipenv/utils/shell.py index e77be0ab1b..030fb19b2e 100644 --- a/pipenv/utils/shell.py +++ b/pipenv/utils/shell.py @@ -48,16 +48,6 @@ def make_posix(path: str) -> str: return path -def get_pipenv_dist(pkg="pipenv", pipenv_site=None): - from pipenv.resolver import find_site_path - - pipenv_libdir = os.path.dirname(os.path.abspath(__file__)) - if pipenv_site is None: - pipenv_site = os.path.dirname(pipenv_libdir) - pipenv_dist, _ = find_site_path(pkg, site_dir=pipenv_site) - return pipenv_dist - - @contextmanager def chdir(path): """Context manager to change working directories.""" From fef715931b012486e5232d283fc396f1ed34f29f Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Mon, 15 Aug 2022 00:01:03 -0400 Subject: [PATCH 060/200] Release v2022.8.15 --- CHANGELOG.rst | 10 ++++++++++ news/5254.bugfix.rst | 1 - pipenv/__version__.py | 2 +- pipenv/pipenv.1 | 12 ++++++++---- 4 files changed, 19 insertions(+), 6 deletions(-) delete mode 100644 news/5254.bugfix.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2592a191a2..64926cc0e6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +2022.8.15 (2022-08-15) +====================== + + +Bug Fixes +--------- + +- ``pip_install`` method was using a different way of finding the python executable than other ``pipenv`` commands, which caused an issue with skipping package installation if it was already installed in site-packages. `#5254 `_ + + 2022.8.14 (2022-08-14) ====================== diff --git a/news/5254.bugfix.rst b/news/5254.bugfix.rst deleted file mode 100644 index 330be90dc7..0000000000 --- a/news/5254.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -``pip_install`` method was using a different way of finding the python executable than other ``pipenv`` commands, which caused an issue with skipping package installation if it was already installed in site-packages. diff --git a/pipenv/__version__.py b/pipenv/__version__.py index ceb814e34e..a2674dffb8 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.15.dev0" +__version__ = "2022.8.15" diff --git a/pipenv/pipenv.1 b/pipenv/pipenv.1 index 553404ddc2..ca16aa4155 100644 --- a/pipenv/pipenv.1 +++ b/pipenv/pipenv.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PIPENV" "1" "Aug 14, 2022" "2022.8.14" "pipenv" +.TH "PIPENV" "1" "Aug 15, 2022" "2022.8.15" "pipenv" .SH NAME pipenv \- pipenv Documentation \fI\%\fP\fI\%\fP\fI\%\fP @@ -453,6 +453,12 @@ You might want to set \fBexport PIPENV_VENV_IN_PROJECT=1\fP in your .bashrc/.zsh .sp Congratulations, you now know how to install and use Python packages! ✨ 🍰 ✨ .SS Release and Version History +.SS 2022.8.15 (2022\-08\-15) +.SS Bug Fixes +.INDENT 0.0 +.IP \(bu 2 +\fBpip_install\fP method was using a different way of finding the python executable than other \fBpipenv\fP commands, which caused an issue with skipping package installation if it was already installed in site\-packages. \fI\%#5254\fP +.UNINDENT .SS 2022.8.14 (2022\-08\-14) .SS Bug Fixes .INDENT 0.0 @@ -2934,9 +2940,7 @@ production environments for reproducible builds. \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 -If you\(aqd like a \fBrequirements.txt\fP output of the lockfile, run \fB$ pipenv lock \-r\fP\&. -This will not include hashes, however. To get a \fBrequirements.txt\fP -you can also use \fB$ pipenv run pip freeze\fP\&. +If you\(aqd like a \fBrequirements.txt\fP output of the lockfile, run \fB$ pipenv requirements\fP\&. .UNINDENT .UNINDENT .SS ☤ Pipenv and Docker Containers From 146b586378a81e499faa93bf44304cf33ac6542c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 15 Aug 2022 04:03:59 +0000 Subject: [PATCH 061/200] Bumped version. Signed-off-by: github-actions[bot] --- pipenv/__version__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipenv/__version__.py b/pipenv/__version__.py index a2674dffb8..f0c068da3b 100644 --- a/pipenv/__version__.py +++ b/pipenv/__version__.py @@ -2,4 +2,4 @@ # // ) ) / / // ) ) //___) ) // ) ) || / / # //___/ / / / //___/ / // // / / || / / # // / / // ((____ // / / ||/ / -__version__ = "2022.8.15" +__version__ = "2022.8.16.dev0" From 3c9a6292d7159795b37887ae594c26544f16857d Mon Sep 17 00:00:00 2001 From: Jeremy Fleischman Date: Tue, 16 Aug 2022 12:33:34 -0700 Subject: [PATCH 062/200] Actually filter out missing interpreters (#5262) * Actually filter out missing interpreters This fixes https://github.com/pypa/pipenv/issues/5261. Before this change, I would get a "The Python interpreter can't be found" error when running `pipenv install --system` with a python3 but no python. --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- README.md | 2 +- news/5261.bugfix.rst | 1 + pipenv/utils/shell.py | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 news/5261.bugfix.rst diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 325d14dbc9..f7c77d1354 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -19,7 +19,7 @@ How does this pull request fix your problem? Did you consider any alternatives? ### The checklist * [ ] Associated issue -* [ ] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix`, `.feature`, `.behavior`, `.doc`. `.vendor`. or `.trivial` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #. +* [ ] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix.rst`, `.feature.rst`, `.behavior.rst`, `.doc.rst`. `.vendor.rst`. or `.trivial.rst` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #.